summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 07:54:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 07:54:09 +0000
commitb91227758bfa7527946b3e8530614724928005ac (patch)
tree3ddfa5e0c43164b29f5682a9c94b09b34def2ee1
parentAdding upstream version 2024.03.10. (diff)
downloadyt-dlp-upstream.tar.xz
yt-dlp-upstream.zip
Adding upstream version 2024.04.09.upstream/2024.04.09upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.github/banner.svg10
-rw-r--r--.github/workflows/build.yml29
-rw-r--r--.github/workflows/core.yml2
-rw-r--r--.github/workflows/quick-test.yml2
-rw-r--r--.github/workflows/release.yml8
-rw-r--r--CONTRIBUTORS10
-rw-r--r--Changelog.md95
-rw-r--r--Makefile22
-rw-r--r--README.md49
-rwxr-xr-xbundle/py2exe.py2
-rw-r--r--devscripts/changelog_override.json21
-rwxr-xr-xdevscripts/install_deps.py38
-rw-r--r--devscripts/make_changelog.py51
-rw-r--r--devscripts/prepare_manpage.py29
-rwxr-xr-xdevscripts/tomlparse.py10
-rw-r--r--devscripts/update-version.py4
-rwxr-xr-xdevscripts/update_changelog.py26
-rw-r--r--pyproject.toml8
-rw-r--r--supportedsites.md13
-rw-r--r--test/test_YoutubeDL.py4
-rw-r--r--test/test_cookies.py4
-rw-r--r--test/test_networking.py632
-rw-r--r--test/test_socks.py33
-rw-r--r--test/test_traversal.py444
-rw-r--r--test/test_utils.py383
-rw-r--r--test/test_websockets.py53
-rw-r--r--yt_dlp/YoutubeDL.py70
-rw-r--r--yt_dlp/__init__.py42
-rw-r--r--yt_dlp/__pyinstaller/hook-yt_dlp.py6
-rw-r--r--yt_dlp/compat/__init__.py9
-rw-r--r--yt_dlp/cookies.py10
-rw-r--r--yt_dlp/dependencies/__init__.py4
-rw-r--r--yt_dlp/downloader/common.py11
-rw-r--r--yt_dlp/downloader/external.py4
-rw-r--r--yt_dlp/extractor/_extractors.py9
-rw-r--r--yt_dlp/extractor/afreecatv.py404
-rw-r--r--yt_dlp/extractor/ard.py4
-rw-r--r--yt_dlp/extractor/asobistage.py154
-rw-r--r--yt_dlp/extractor/atvat.py8
-rw-r--r--yt_dlp/extractor/aws.py4
-rw-r--r--yt_dlp/extractor/bibeltv.py4
-rw-r--r--yt_dlp/extractor/box.py37
-rw-r--r--yt_dlp/extractor/bundestag.py8
-rw-r--r--yt_dlp/extractor/cbc.py86
-rw-r--r--yt_dlp/extractor/cda.py6
-rw-r--r--yt_dlp/extractor/common.py69
-rw-r--r--yt_dlp/extractor/crunchyroll.py143
-rw-r--r--yt_dlp/extractor/dropbox.py4
-rw-r--r--yt_dlp/extractor/dtube.py4
-rw-r--r--yt_dlp/extractor/fathom.py54
-rw-r--r--yt_dlp/extractor/generic.py16
-rw-r--r--yt_dlp/extractor/gofile.py13
-rw-r--r--yt_dlp/extractor/goplay.py4
-rw-r--r--yt_dlp/extractor/imgur.py18
-rw-r--r--yt_dlp/extractor/jiosaavn.py179
-rw-r--r--yt_dlp/extractor/joqrag.py12
-rw-r--r--yt_dlp/extractor/kick.py32
-rw-r--r--yt_dlp/extractor/leeco.py4
-rw-r--r--yt_dlp/extractor/linkedin.py4
-rw-r--r--yt_dlp/extractor/loom.py461
-rw-r--r--yt_dlp/extractor/masters.py1
-rw-r--r--yt_dlp/extractor/medici.py182
-rw-r--r--yt_dlp/extractor/microsoftstream.py4
-rw-r--r--yt_dlp/extractor/mixch.py64
-rw-r--r--yt_dlp/extractor/motherless.py4
-rw-r--r--yt_dlp/extractor/naver.py4
-rw-r--r--yt_dlp/extractor/neteasemusic.py8
-rw-r--r--yt_dlp/extractor/nhk.py202
-rw-r--r--yt_dlp/extractor/niconico.py11
-rw-r--r--yt_dlp/extractor/panopto.py10
-rw-r--r--yt_dlp/extractor/patreon.py44
-rw-r--r--yt_dlp/extractor/polsatgo.py4
-rw-r--r--yt_dlp/extractor/pr0gramm.py6
-rw-r--r--yt_dlp/extractor/prosiebensat1.py10
-rw-r--r--yt_dlp/extractor/radiokapital.py14
-rw-r--r--yt_dlp/extractor/rokfin.py4
-rw-r--r--yt_dlp/extractor/sejmpl.py14
-rw-r--r--yt_dlp/extractor/sharepoint.py112
-rw-r--r--yt_dlp/extractor/sonyliv.py64
-rw-r--r--yt_dlp/extractor/soundcloud.py109
-rw-r--r--yt_dlp/extractor/telewebion.py11
-rw-r--r--yt_dlp/extractor/tenplay.py4
-rw-r--r--yt_dlp/extractor/thisoldhouse.py52
-rw-r--r--yt_dlp/extractor/tiktok.py172
-rw-r--r--yt_dlp/extractor/twitch.py10
-rw-r--r--yt_dlp/extractor/vk.py15
-rw-r--r--yt_dlp/extractor/vrt.py62
-rw-r--r--yt_dlp/extractor/wistia.py4
-rw-r--r--yt_dlp/extractor/xvideos.py34
-rw-r--r--yt_dlp/extractor/youtube.py32
-rw-r--r--yt_dlp/extractor/zattoo.py4
-rw-r--r--yt_dlp/networking/__init__.py7
-rw-r--r--yt_dlp/networking/_curlcffi.py221
-rw-r--r--yt_dlp/networking/_helper.py4
-rw-r--r--yt_dlp/networking/_requests.py7
-rw-r--r--yt_dlp/networking/_urllib.py6
-rw-r--r--yt_dlp/networking/_websockets.py38
-rw-r--r--yt_dlp/networking/common.py14
-rw-r--r--yt_dlp/networking/impersonate.py141
-rw-r--r--yt_dlp/options.py20
-rw-r--r--yt_dlp/update.py2
-rw-r--r--yt_dlp/utils/_utils.py129
-rw-r--r--yt_dlp/utils/traversal.py37
-rw-r--r--yt_dlp/version.py6
104 files changed, 4203 insertions, 1579 deletions
diff --git a/.github/banner.svg b/.github/banner.svg
index 35dc93e..ea7f9e3 100644
--- a/.github/banner.svg
+++ b/.github/banner.svg
@@ -1,4 +1,4 @@
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="xMidYMid" width="699.935" height="173.764" viewBox="0 0 717 178">
+<svg xmlns="http://www.w3.org/2000/svg" width="746" height="176" viewBox="0 0 746 176">
<defs>
<style>
.cls-1, .cls-4 {
@@ -24,8 +24,8 @@
}
</style>
</defs>
- <path d="M89.846,166.601 L87.111,166.601 L87.111,172.000 L82.173,172.000 L82.173,153.812 L90.024,153.812 C94.064,153.812 96.773,156.370 96.773,160.242 C96.773,164.158 93.993,166.601 89.846,166.601 ZM88.851,157.755 L87.111,157.755 L87.111,162.764 L88.851,162.764 C90.583,162.764 91.622,161.796 91.622,160.242 C91.622,158.679 90.583,157.755 88.851,157.755 ZM67.898,153.812 L72.835,153.812 L72.835,168.021 L80.189,168.021 L80.189,172.000 L67.898,172.000 L67.898,153.812 ZM56.572,172.000 L49.574,172.000 L49.574,153.812 L56.501,153.812 C62.113,153.812 65.630,157.223 65.630,162.906 C65.630,168.590 62.113,172.000 56.572,172.000 ZM56.252,158.004 L54.511,158.004 L54.511,167.808 L56.394,167.808 C59.094,167.808 60.657,166.707 60.657,162.906 C60.657,159.105 59.094,158.004 56.252,158.004 ZM38.211,162.906 L46.736,162.906 L46.736,166.601 L38.211,166.601 L38.211,162.906 ZM31.253,172.000 L26.387,172.000 L26.387,157.791 L20.916,157.791 L20.916,153.812 L36.724,153.812 L36.724,157.791 L31.253,157.791 L31.253,172.000 ZM12.007,172.000 L7.104,172.000 L7.104,166.281 L0.426,153.812 L5.932,153.812 L9.484,161.201 L9.627,161.201 L13.179,153.812 L18.685,153.812 L12.007,166.281 L12.007,172.000 Z" class="cls-1"/>
- <path d="M714.317,161.947 C714.104,160.988 713.536,159.993 711.689,159.993 C710.019,159.993 708.634,160.846 708.456,162.018 C708.278,163.048 708.918,163.617 710.445,164.007 L712.399,164.505 C714.743,165.109 715.738,166.281 715.418,168.199 C715.028,170.544 712.577,172.284 709.415,172.284 C706.609,172.284 704.904,171.041 704.797,168.732 L706.893,168.235 C707.000,169.691 707.959,170.437 709.664,170.437 C711.617,170.437 713.038,169.478 713.216,168.306 C713.394,167.347 712.861,166.707 711.511,166.387 L709.344,165.855 C706.928,165.251 706.005,164.007 706.325,162.125 C706.715,159.816 709.131,158.182 712.008,158.182 C714.708,158.182 715.951,159.461 716.306,161.414 L714.317,161.947 ZM702.671,165.890 L692.751,165.890 C692.245,169.229 693.648,170.401 696.276,170.401 C697.955,170.401 699.269,169.691 700.042,168.270 L701.960,168.838 C700.974,170.899 698.736,172.284 695.957,172.284 C692.023,172.284 690.069,169.478 690.770,165.286 C691.454,161.095 694.403,158.182 698.088,158.182 C700.939,158.182 703.674,159.922 702.813,165.002 L702.671,165.890 ZM697.768,160.064 C695.477,160.064 693.461,162.143 693.044,164.078 L700.823,164.078 C701.223,161.770 700.051,160.064 697.768,160.064 ZM687.862,172.000 L685.446,172.000 L683.066,166.707 L678.910,172.000 L676.494,172.000 L681.965,165.180 L678.768,158.359 L681.183,158.359 L683.528,163.936 L687.720,158.359 L690.135,158.359 L684.594,165.180 L687.862,172.000 ZM673.886,154.630 C673.886,153.848 674.560,153.209 675.377,153.209 C676.194,153.209 676.869,153.848 676.869,154.630 C676.869,155.411 676.194,156.050 675.377,156.050 C674.560,156.050 673.886,155.411 673.886,154.630 ZM673.513,172.000 L671.417,172.000 L673.690,158.359 L675.786,158.359 L673.513,172.000 ZM670.212,154.914 C668.826,154.914 668.151,155.624 667.903,156.974 L667.672,158.359 L670.745,158.359 L670.460,160.135 L667.379,160.135 L665.416,172.000 L663.320,172.000 L665.301,160.135 L663.107,160.135 L663.391,158.359 L665.603,158.359 L665.914,156.477 C666.269,154.132 668.365,152.960 670.318,152.960 C671.348,152.960 671.952,153.173 672.237,153.315 L671.348,155.127 C671.135,155.056 670.816,154.914 670.212,154.914 ZM649.225,172.000 L649.580,169.904 L649.332,169.904 C648.745,170.650 647.582,172.284 644.962,172.284 C641.543,172.284 639.616,169.549 640.327,165.215 C641.046,160.917 643.879,158.182 647.324,158.182 C649.989,158.182 650.539,159.816 650.877,160.526 L651.054,160.526 L652.173,153.812 L654.269,153.812 L651.250,172.000 L649.225,172.000 ZM647.182,160.064 C644.527,160.064 642.911,162.302 642.440,165.180 C641.952,168.093 642.849,170.401 645.477,170.401 C647.999,170.401 649.811,168.270 650.326,165.180 C650.832,162.125 649.749,160.064 647.182,160.064 ZM635.980,172.000 L633.884,172.000 L635.305,163.475 C635.660,161.343 634.701,160.064 632.747,160.064 C630.723,160.064 629.053,161.414 628.627,163.794 L627.277,172.000 L625.181,172.000 L627.454,158.359 L629.479,158.359 L629.124,160.491 L629.302,160.491 C630.154,159.105 631.611,158.182 633.671,158.182 C636.406,158.182 638.005,159.851 637.436,163.333 L635.980,172.000 ZM621.349,172.000 L619.253,172.000 L619.573,170.153 L619.466,170.153 C618.898,171.041 617.442,172.320 615.062,172.320 C612.468,172.320 610.657,170.792 611.083,168.128 C611.616,165.002 614.458,164.434 617.051,164.114 C619.573,163.794 620.603,163.865 620.781,162.871 L620.781,162.800 C621.065,161.059 620.354,160.029 618.436,160.029 C616.447,160.029 615.097,161.095 614.458,162.089 L612.611,161.379 C614.067,158.892 616.554,158.182 618.614,158.182 C620.354,158.182 623.551,158.679 622.841,163.013 L621.349,172.000 ZM616.660,165.926 C614.991,166.139 613.428,166.636 613.179,168.235 C612.930,169.691 613.996,170.437 615.665,170.437 C618.152,170.437 619.786,168.767 620.070,167.062 L620.390,165.144 C619.964,165.570 617.548,165.819 616.660,165.926 ZM597.804,159.993 C596.135,159.993 594.749,160.846 594.572,162.018 C594.394,163.048 595.033,163.617 596.561,164.007 L598.515,164.505 C600.859,165.109 601.854,166.281 601.534,168.199 C601.143,170.544 598.692,172.284 595.531,172.284 C592.724,172.284 591.019,171.041 590.913,168.732 L593.009,168.235 C593.115,169.691 594.074,170.437 595.779,170.437 C597.733,170.437 599.154,169.478 599.332,168.306 C599.509,167.347 598.976,166.707 597.627,166.387 L595.460,165.855 C593.044,165.251 592.121,164.007 592.440,162.125 C592.831,159.816 595.247,158.182 598.124,158.182 C600.824,158.182 602.067,159.461 602.422,161.414 L600.433,161.947 C600.220,160.988 599.651,159.993 597.804,159.993 ZM588.786,165.890 L578.866,165.890 C578.360,169.229 579.763,170.401 582.392,170.401 C584.071,170.401 585.385,169.691 586.157,168.270 L588.076,168.838 C587.090,170.899 584.852,172.284 582.072,172.284 C578.138,172.284 576.185,169.478 576.886,165.286 C577.570,161.095 580.518,158.182 584.204,158.182 C587.054,158.182 589.790,159.922 588.928,165.002 L588.786,165.890 ZM583.884,160.064 C581.593,160.064 579.577,162.143 579.160,164.078 L586.939,164.078 C587.339,161.770 586.166,160.064 583.884,160.064 ZM574.722,160.171 C572.733,160.171 571.046,161.530 570.744,163.368 L569.323,172.000 L567.227,172.000 L569.500,158.359 L571.525,158.359 L571.170,160.420 L571.312,160.420 C572.023,159.070 573.586,158.146 575.255,158.146 C576.001,158.146 576.534,158.324 576.889,158.644 L575.894,160.384 C575.646,160.242 575.255,160.171 574.722,160.171 ZM561.299,172.000 L561.690,169.691 L561.548,169.691 C560.695,171.076 559.132,172.178 557.072,172.178 C554.515,172.178 552.952,170.508 553.520,167.027 L554.976,158.359 L557.072,158.359 L555.651,166.885 C555.332,168.874 556.362,170.153 558.102,170.153 C559.665,170.153 561.797,168.981 562.223,166.423 L563.573,158.359 L565.669,158.359 L563.395,172.000 L561.299,172.000 ZM551.534,160.135 L548.594,160.135 L547.271,168.093 C546.987,169.869 547.839,170.153 548.763,170.153 C549.225,170.153 549.509,170.082 549.686,170.046 L549.829,171.929 C549.509,172.036 548.976,172.178 548.195,172.178 C546.418,172.178 544.713,171.041 545.104,168.661 L546.507,160.135 L544.465,160.135 L544.749,158.359 L546.800,158.359 L547.342,155.091 L549.438,155.091 L548.896,158.359 L551.818,158.359 L551.534,160.135 ZM539.780,172.000 L537.684,172.000 L538.004,170.153 L537.897,170.153 C537.329,171.041 535.873,172.320 533.493,172.320 C530.900,172.320 529.088,170.792 529.514,168.128 C530.047,165.002 532.889,164.434 535.482,164.114 C538.004,163.794 539.034,163.865 539.212,162.871 L539.212,162.800 C539.496,161.059 538.786,160.029 536.867,160.029 C534.878,160.029 533.528,161.095 532.889,162.089 L531.042,161.379 C532.498,158.892 534.985,158.182 537.045,158.182 C538.786,158.182 541.983,158.679 541.272,163.013 L539.780,172.000 ZM535.091,165.926 C533.422,166.139 531.859,166.636 531.610,168.235 C531.361,169.691 532.427,170.437 534.097,170.437 C536.583,170.437 538.217,168.767 538.501,167.062 L538.821,165.144 C538.395,165.570 535.979,165.819 535.091,165.926 ZM527.316,165.890 L517.397,165.890 C516.891,169.229 518.294,170.401 520.922,170.401 C522.601,170.401 523.915,169.691 524.688,168.270 L526.606,168.838 C525.620,170.899 523.382,172.284 520.603,172.284 C516.669,172.284 514.715,169.478 515.416,165.286 C516.100,161.095 519.049,158.182 522.734,158.182 C525.585,158.182 528.320,159.922 527.459,165.002 L527.316,165.890 ZM522.414,160.064 C520.123,160.064 518.107,162.143 517.690,164.078 L525.469,164.078 C525.869,161.770 524.697,160.064 522.414,160.064 ZM514.282,154.914 C512.897,154.914 512.222,155.624 511.973,156.974 L511.742,158.359 L514.815,158.359 L514.531,160.135 L511.449,160.135 L509.487,172.000 L507.391,172.000 L509.371,160.135 L507.178,160.135 L507.462,158.359 L509.673,158.359 L509.984,156.477 C510.339,154.132 512.435,152.960 514.389,152.960 C515.419,152.960 516.023,153.173 516.307,153.315 L515.419,155.127 C515.206,155.056 514.886,154.914 514.282,154.914 ZM493.506,172.000 L496.525,153.812 L498.621,153.812 L495.601,172.000 L493.506,172.000 ZM489.674,172.000 L487.578,172.000 L487.898,170.153 L487.791,170.153 C487.223,171.041 485.766,172.320 483.386,172.320 C480.793,172.320 478.981,170.792 479.408,168.128 C479.941,165.002 482.782,164.434 485.375,164.114 C487.898,163.794 488.928,163.865 489.105,162.871 L489.105,162.800 C489.390,161.059 488.679,160.029 486.761,160.029 C484.772,160.029 483.422,161.095 482.782,162.089 L480.935,161.379 C482.392,158.892 484.878,158.182 486.938,158.182 C488.679,158.182 491.876,158.679 491.166,163.013 L489.674,172.000 ZM484.985,165.926 C483.315,166.139 481.752,166.636 481.504,168.235 C481.255,169.691 482.321,170.437 483.990,170.437 C486.477,170.437 488.111,168.767 488.395,167.062 L488.715,165.144 C488.288,165.570 485.873,165.819 484.985,165.926 ZM475.576,172.000 L473.480,172.000 L474.901,163.475 C475.256,161.343 474.297,160.064 472.343,160.064 C470.319,160.064 468.649,161.414 468.223,163.794 L466.873,172.000 L464.777,172.000 L467.051,158.359 L469.075,158.359 L468.720,160.491 L468.898,160.491 C469.750,159.105 471.207,158.182 473.267,158.182 C476.002,158.182 477.601,159.851 477.032,163.333 L475.576,172.000 ZM455.511,172.284 C451.745,172.284 449.703,169.407 450.395,165.109 C451.070,160.917 453.948,158.182 457.571,158.182 C461.336,158.182 463.388,161.059 462.686,165.393 C462.011,169.549 459.134,172.284 455.511,172.284 ZM457.535,160.064 C454.658,160.064 452.873,162.587 452.420,165.393 C451.994,168.057 452.811,170.401 455.546,170.401 C458.423,170.401 460.208,167.924 460.661,165.109 C461.088,162.444 460.271,160.064 457.535,160.064 ZM446.401,154.630 C446.401,153.848 447.076,153.209 447.893,153.209 C448.710,153.209 449.385,153.848 449.385,154.630 C449.385,155.411 448.710,156.050 447.893,156.050 C447.076,156.050 446.401,155.411 446.401,154.630 ZM446.028,172.000 L443.932,172.000 L446.206,158.359 L448.301,158.359 L446.028,172.000 ZM442.763,160.135 L439.823,160.135 L438.500,168.093 C438.216,169.869 439.069,170.153 439.992,170.153 C440.454,170.153 440.738,170.082 440.916,170.046 L441.058,171.929 C440.738,172.036 440.205,172.178 439.424,172.178 C437.648,172.178 435.943,171.041 436.333,168.661 L437.736,160.135 L435.694,160.135 L435.978,158.359 L438.030,158.359 L438.571,155.091 L440.667,155.091 L440.125,158.359 L443.047,158.359 L442.763,160.135 ZM431.380,154.630 C431.380,153.848 432.055,153.209 432.872,153.209 C433.689,153.209 434.364,153.848 434.364,154.630 C434.364,155.411 433.689,156.050 432.872,156.050 C432.055,156.050 431.380,155.411 431.380,154.630 ZM431.007,172.000 L428.911,172.000 L431.184,158.359 L433.280,158.359 L431.007,172.000 ZM422.770,172.000 L423.126,169.904 L422.877,169.904 C422.291,170.650 421.128,172.284 418.508,172.284 C415.089,172.284 413.162,169.549 413.872,165.215 C414.591,160.917 417.424,158.182 420.870,158.182 C423.534,158.182 424.085,159.816 424.422,160.526 L424.600,160.526 L425.719,153.812 L427.815,153.812 L424.795,172.000 L422.770,172.000 ZM420.728,160.064 C418.073,160.064 416.456,162.302 415.986,165.180 C415.497,168.093 416.394,170.401 419.023,170.401 C421.545,170.401 423.357,168.270 423.872,165.180 C424.378,162.125 423.294,160.064 420.728,160.064 ZM407.216,172.000 L407.572,169.904 L407.323,169.904 C406.737,170.650 405.573,172.284 402.954,172.284 C399.535,172.284 397.608,169.549 398.318,165.215 C399.037,160.917 401.870,158.182 405.316,158.182 C407.980,158.182 408.531,159.816 408.868,160.526 L409.046,160.526 L410.165,153.812 L412.261,153.812 L409.241,172.000 L407.216,172.000 ZM405.174,160.064 C402.519,160.064 400.902,162.302 400.432,165.180 C399.943,168.093 400.840,170.401 403.469,170.401 C405.991,170.401 407.803,168.270 408.318,165.180 C408.824,162.125 407.740,160.064 405.174,160.064 ZM393.971,172.000 L391.875,172.000 L392.195,170.153 L392.088,170.153 C391.520,171.041 390.063,172.320 387.683,172.320 C385.090,172.320 383.279,170.792 383.705,168.128 C384.238,165.002 387.080,164.434 389.673,164.114 C392.195,163.794 393.225,163.865 393.403,162.871 L393.403,162.800 C393.687,161.059 392.976,160.029 391.058,160.029 C389.069,160.029 387.719,161.095 387.080,162.089 L385.232,161.379 C386.689,158.892 389.175,158.182 391.236,158.182 C392.976,158.182 396.173,158.679 395.463,163.013 L393.971,172.000 ZM389.282,165.926 C387.612,166.139 386.049,166.636 385.801,168.235 C385.552,169.691 386.618,170.437 388.287,170.437 C390.774,170.437 392.408,168.767 392.692,167.062 L393.012,165.144 C392.586,165.570 390.170,165.819 389.282,165.926 ZM372.842,172.000 L370.746,172.000 L372.167,163.475 C372.522,161.308 371.528,160.064 369.574,160.064 C367.513,160.064 365.773,161.414 365.347,163.794 L363.997,172.000 L361.901,172.000 L364.920,153.812 L367.016,153.812 L365.915,160.491 L366.093,160.491 C366.945,159.070 368.330,158.182 370.497,158.182 C373.268,158.182 374.867,159.816 374.298,163.333 L372.842,172.000 ZM360.448,160.135 L357.508,160.135 L356.185,168.093 C355.901,169.869 356.753,170.153 357.677,170.153 C358.139,170.153 358.423,170.082 358.601,170.046 L358.743,171.929 C358.423,172.036 357.890,172.178 357.109,172.178 C355.333,172.178 353.627,171.041 354.018,168.661 L355.421,160.135 L353.379,160.135 L353.663,158.359 L355.714,158.359 L356.256,155.091 L358.352,155.091 L357.810,158.359 L360.732,158.359 L360.448,160.135 ZM349.065,154.630 C349.065,153.848 349.740,153.209 350.557,153.209 C351.374,153.209 352.049,153.848 352.049,154.630 C352.049,155.411 351.374,156.050 350.557,156.050 C349.740,156.050 349.065,155.411 349.065,154.630 ZM348.692,172.000 L346.596,172.000 L348.869,158.359 L350.965,158.359 L348.692,172.000 ZM337.615,172.000 L336.372,161.521 L336.159,161.521 L331.434,172.000 L329.374,172.000 L327.491,158.359 L329.694,158.359 L330.901,168.803 L331.043,168.803 L335.697,158.359 L337.935,158.359 L339.072,168.767 L339.214,168.767 L343.903,158.359 L346.105,158.359 L339.675,172.000 L337.615,172.000 ZM316.983,172.000 L314.319,172.000 L310.296,165.526 L308.600,166.885 L307.747,172.000 L305.651,172.000 L308.671,153.812 L310.767,153.812 L308.999,164.434 L309.239,164.434 L316.237,158.359 L318.830,158.359 L312.090,164.203 L316.983,172.000 ZM303.559,160.171 C301.569,160.171 299.882,161.530 299.580,163.368 L298.159,172.000 L296.063,172.000 L298.337,158.359 L300.362,158.359 L300.006,160.420 L300.149,160.420 C300.859,159.070 302.422,158.146 304.091,158.146 C304.837,158.146 305.370,158.324 305.726,158.644 L304.731,160.384 C304.482,160.242 304.091,160.171 303.559,160.171 ZM286.797,172.284 C283.031,172.284 280.989,169.407 281.682,165.109 C282.356,160.917 285.234,158.182 288.857,158.182 C292.622,158.182 294.674,161.059 293.972,165.393 C293.297,169.549 290.420,172.284 286.797,172.284 ZM288.822,160.064 C285.944,160.064 284.159,162.587 283.706,165.393 C283.280,168.057 284.097,170.401 286.832,170.401 C289.710,170.401 291.495,167.924 291.948,165.109 C292.374,162.444 291.557,160.064 288.822,160.064 ZM280.512,154.914 C279.126,154.914 278.452,155.624 278.203,156.974 L277.972,158.359 L281.045,158.359 L280.760,160.135 L277.679,160.135 L275.716,172.000 L273.620,172.000 L275.601,160.135 L273.407,160.135 L273.691,158.359 L275.903,158.359 L276.214,156.477 C276.569,154.132 278.665,152.960 280.618,152.960 C281.649,152.960 282.252,153.173 282.537,153.315 L281.649,155.127 C281.435,155.056 281.116,154.914 280.512,154.914 ZM259.735,172.000 L262.755,153.812 L264.851,153.812 L261.831,172.000 L259.735,172.000 ZM253.595,172.000 L253.950,169.904 L253.701,169.904 C253.115,170.650 251.952,172.284 249.332,172.284 C245.913,172.284 243.986,169.549 244.696,165.215 C245.416,160.917 248.249,158.182 251.694,158.182 C254.358,158.182 254.909,159.816 255.246,160.526 L255.424,160.526 L256.543,153.812 L258.639,153.812 L255.619,172.000 L253.595,172.000 ZM251.552,160.064 C248.897,160.064 247.281,162.302 246.810,165.180 C246.321,168.093 247.218,170.401 249.847,170.401 C252.369,170.401 254.181,168.270 254.696,165.180 C255.202,162.125 254.119,160.064 251.552,160.064 ZM233.670,165.180 L233.990,163.226 L241.947,163.226 L241.627,165.180 L233.670,165.180 ZM230.478,165.890 L220.558,165.890 C220.052,169.229 221.455,170.401 224.084,170.401 C225.762,170.401 227.077,169.691 227.849,168.270 L229.768,168.838 C228.782,170.899 226.544,172.284 223.764,172.284 C219.830,172.284 217.876,169.478 218.578,165.286 C219.262,161.095 222.210,158.182 225.896,158.182 C228.746,158.182 231.482,159.922 230.620,165.002 L230.478,165.890 ZM225.576,160.064 C223.285,160.064 221.269,162.143 220.851,164.078 L228.631,164.078 C229.030,161.770 227.858,160.064 225.576,160.064 ZM209.063,172.284 C206.434,172.284 205.830,170.650 205.475,169.904 L205.226,169.904 L204.871,172.000 L202.846,172.000 L205.866,153.812 L207.962,153.812 L206.860,160.526 L207.038,160.526 C207.606,159.816 208.708,158.182 211.372,158.182 C214.817,158.182 216.736,160.917 216.025,165.215 C215.315,169.549 212.473,172.284 209.063,172.284 ZM210.803,160.064 C208.246,160.064 206.541,162.125 206.043,165.180 C205.546,168.270 206.576,170.401 209.098,170.401 C211.727,170.401 213.432,168.093 213.929,165.180 C214.391,162.302 213.468,160.064 210.803,160.064 ZM196.634,172.000 L197.025,169.691 L196.883,169.691 C196.031,171.076 194.468,172.178 192.407,172.178 C189.850,172.178 188.287,170.508 188.855,167.027 L190.311,158.359 L192.407,158.359 L190.986,166.885 C190.667,168.874 191.697,170.153 193.437,170.153 C195.000,170.153 197.132,168.981 197.558,166.423 L198.908,158.359 L201.004,158.359 L198.730,172.000 L196.634,172.000 ZM186.869,160.135 L183.929,160.135 L182.606,168.093 C182.322,169.869 183.174,170.153 184.098,170.153 C184.560,170.153 184.844,170.082 185.022,170.046 L185.164,171.929 C184.844,172.036 184.311,172.178 183.530,172.178 C181.754,172.178 180.048,171.041 180.439,168.661 L181.842,160.135 L179.800,160.135 L180.084,158.359 L182.135,158.359 L182.677,155.091 L184.773,155.091 L184.231,158.359 L187.153,158.359 L186.869,160.135 ZM173.020,172.000 L173.410,169.691 L173.268,169.691 C172.416,171.076 170.853,172.178 168.792,172.178 C166.235,172.178 164.672,170.508 165.240,167.027 L166.697,158.359 L168.792,158.359 L167.372,166.885 C167.052,168.874 168.082,170.153 169.823,170.153 C171.386,170.153 173.517,168.981 173.943,166.423 L175.293,158.359 L177.389,158.359 L175.115,172.000 L173.020,172.000 ZM155.157,172.284 C151.391,172.284 149.349,169.407 150.041,165.109 C150.716,160.917 153.594,158.182 157.217,158.182 C160.982,158.182 163.034,161.059 162.332,165.393 C161.657,169.549 158.780,172.284 155.157,172.284 ZM157.181,160.064 C154.304,160.064 152.519,162.587 152.066,165.393 C151.640,168.057 152.457,170.401 155.192,170.401 C158.069,170.401 159.854,167.924 160.307,165.109 C160.734,162.444 159.917,160.064 157.181,160.064 ZM136.511,177.293 C135.801,177.293 135.197,177.151 135.019,177.044 L135.836,175.197 C137.293,175.588 138.207,175.366 139.317,173.350 L140.063,172.000 L137.293,158.359 L139.495,158.359 L141.449,169.229 L141.591,169.229 L147.168,158.359 L149.512,158.359 L141.023,174.202 C139.886,176.298 138.429,177.293 136.511,177.293 ZM123.225,166.849 L115.871,166.849 L113.181,172.000 L110.872,172.000 L120.569,153.812 L122.843,153.812 L126.501,172.000 L124.192,172.000 L123.225,166.849 ZM121.315,156.690 L121.173,156.690 L116.893,164.895 L122.860,164.895 L121.315,156.690 Z" class="cls-2"/>
- <path d="M252.245,116.350 L252.245,102.200 L309.303,102.200 L309.303,116.350 L252.245,116.350 ZM208.254,81.088 L245.342,59.291 L208.254,38.180 L216.242,25.227 L260.862,52.844 L260.862,65.739 L216.413,93.355 L208.254,81.088 Z" class="cls-3"/>
- <path d="M508.108,52.635 C507.921,55.093 507.643,57.527 507.274,59.937 L504.214,78.017 C503.658,81.170 502.754,84.324 501.502,87.475 C500.250,90.628 498.464,93.479 496.147,96.028 C493.829,98.579 491.047,100.503 487.802,101.800 C484.556,103.097 481.311,103.747 478.067,103.747 C476.211,103.747 474.357,103.491 472.504,102.982 C470.648,102.474 469.072,101.615 467.775,100.409 C466.475,99.205 465.410,97.767 464.576,96.098 C463.741,94.429 463.092,92.714 462.629,90.952 L455.953,131.146 L436.482,131.146 L453.310,28.922 L472.921,28.922 L471.391,38.240 C472.504,36.665 473.777,35.180 475.216,33.790 C476.652,32.399 478.228,31.240 479.944,30.313 C481.659,29.387 483.467,28.737 485.369,28.365 C487.268,27.996 489.145,27.809 491.001,27.809 C493.411,27.809 495.706,28.226 497.886,29.061 C500.063,29.895 501.871,31.171 503.310,32.886 C504.746,34.602 505.835,36.549 506.578,38.727 C507.319,40.907 507.806,43.156 508.039,45.472 C508.269,47.791 508.293,50.179 508.108,52.635 ZM487.455,48.184 C486.851,46.841 485.877,45.798 484.534,45.055 C483.189,44.314 481.729,43.942 480.153,43.942 C478.762,43.942 477.393,44.151 476.050,44.568 C474.705,44.986 473.499,45.681 472.434,46.655 C471.367,47.628 470.556,48.765 470.000,50.062 C469.444,51.362 469.027,52.659 468.748,53.956 L465.828,72.037 C465.641,73.149 465.480,74.286 465.341,75.444 C465.202,76.605 465.178,77.717 465.271,78.782 C465.363,79.849 465.526,80.916 465.758,81.981 C465.988,83.048 466.384,84.022 466.940,84.902 C467.497,85.784 468.283,86.456 469.305,86.918 C470.324,87.383 471.391,87.614 472.504,87.614 C474.079,87.614 475.633,87.314 477.163,86.710 C478.693,86.108 480.036,85.204 481.196,83.998 C482.354,82.794 483.235,81.425 483.839,79.895 C484.441,78.365 484.882,76.859 485.160,75.375 L488.081,57.294 C488.359,55.719 488.474,54.143 488.428,52.565 C488.381,50.990 488.057,49.530 487.455,48.184 ZM433.422,86.501 L433.422,102.635 L384.744,102.635 L384.744,86.501 L401.433,86.501 L412.977,16.544 L400.460,16.544 L400.460,0.410 L435.230,0.410 L420.905,86.501 L433.422,86.501 ZM354.285,102.635 L355.815,93.177 C354.793,94.846 353.542,96.354 352.060,97.697 C350.575,99.042 348.976,100.179 347.261,101.105 C345.545,102.033 343.736,102.704 341.837,103.121 C339.936,103.539 338.058,103.747 336.204,103.747 C333.792,103.747 331.497,103.330 329.320,102.495 C327.140,101.661 325.332,100.388 323.896,98.671 C322.457,96.956 321.368,94.985 320.627,92.760 C319.884,90.535 319.397,88.264 319.167,85.945 C318.934,83.628 318.911,81.264 319.097,78.852 C319.282,76.442 319.606,74.032 320.071,71.620 L322.992,53.539 C323.548,50.295 324.452,47.096 325.704,43.942 C326.955,40.791 328.764,37.962 331.128,35.459 C333.492,32.955 336.274,31.056 339.473,29.756 C342.672,28.459 345.892,27.809 349.139,27.809 C351.086,27.809 352.964,28.066 354.772,28.574 C356.580,29.085 358.155,29.943 359.500,31.147 C360.843,32.353 361.934,33.790 362.769,35.459 C363.603,37.128 364.205,38.844 364.577,40.605 L371.253,0.410 L390.724,0.410 L373.895,102.635 L354.285,102.635 ZM362.004,52.705 C361.956,51.592 361.795,50.503 361.517,49.436 C361.239,48.371 360.822,47.419 360.265,46.585 C359.709,45.751 358.920,45.103 357.901,44.638 C356.880,44.175 355.815,43.942 354.702,43.942 C353.124,43.942 351.573,44.245 350.043,44.846 C348.513,45.451 347.168,46.355 346.010,47.559 C344.849,48.765 343.969,50.110 343.367,51.592 C342.763,53.076 342.370,54.606 342.185,56.182 L339.125,74.262 C338.847,75.840 338.730,77.415 338.777,78.991 C338.823,80.569 339.147,82.029 339.751,83.372 C340.353,84.717 341.326,85.760 342.672,86.501 C344.015,87.244 345.475,87.614 347.053,87.614 C348.443,87.614 349.810,87.405 351.156,86.988 C352.499,86.571 353.705,85.875 354.772,84.902 C355.836,83.928 356.671,82.794 357.275,81.494 C357.877,80.197 358.270,78.900 358.457,77.600 L361.517,59.520 C361.702,58.407 361.841,57.272 361.934,56.112 C362.026,54.954 362.049,53.817 362.004,52.705 Z" class="cls-4"/>
+ <path class="cls-1" d="M0.426,154.812L7.1,167.281V173h4.9v-5.719l6.678-12.469H13.179L9.627,162.2H9.485l-3.552-7.389H0.426Zm20.49,3.979h5.471V173h4.866V158.791h5.47v-3.979H20.916v3.979Zm25.82,5.115H38.211V167.6h8.525v-3.695ZM56.572,173c5.541,0,9.058-3.41,9.058-9.094s-3.517-9.094-9.129-9.094H49.574V173h7Zm-2.06-4.192V159h1.741c2.842,0,4.4,1.1,4.4,4.9s-1.563,4.9-4.263,4.9H54.512ZM67.9,173H80.189v-3.979H72.835V154.812H67.9V173Zm14.275,0h4.938v-5.4h2.735c4.147,0,6.927-2.443,6.927-6.359,0-3.872-2.709-6.43-6.749-6.43h-7.85V173Zm4.938-9.236v-5.009h1.741c1.732,0,2.771.924,2.771,2.487a2.452,2.452,0,0,1-2.771,2.522H87.111Z"/>
+ <path class="cls-2" d="M113.281,173l2.691-5.151h7.353L124.293,173H126.6l-3.659-18.188H120.67L110.972,173h2.309Zm3.712-7.1,4.281-8.206h0.142l1.545,8.206h-5.968Zm27.54-6.537h-3.072l0.231-1.385a2.139,2.139,0,0,1,2.309-2.06,2.987,2.987,0,0,1,1.136.213l0.888-1.812a4.324,4.324,0,0,0-1.918-.355,4.271,4.271,0,0,0-4.405,3.517l-0.31,1.882H137.18l-0.284,1.776h2.194L137.109,173h2.1l1.963-11.865h3.081Zm5.788,13.925a6.444,6.444,0,0,0,6-3.445l-1.918-.569a4.022,4.022,0,0,1-3.765,2.131c-2.629,0-4.032-1.172-3.526-4.511h9.92l0.142-.888c0.861-5.08-1.874-6.82-4.725-6.82-3.685,0-6.633,2.913-7.317,7.1C144.433,170.478,146.387,173.284,150.321,173.284Zm-2.913-8.206a5.27,5.27,0,0,1,4.725-4.014c2.282,0,3.454,1.706,3.055,4.014h-7.78Zm15.8,8.242a5.173,5.173,0,0,0,4.405-2.167h0.106L167.4,173h2.1l1.492-8.987c0.71-4.334-2.487-4.831-4.228-4.831a6.658,6.658,0,0,0-6,3.2l1.847,0.71a4.673,4.673,0,0,1,3.979-2.06c1.918,0,2.628,1.03,2.344,2.771v0.071c-0.177.994-1.208,0.923-3.73,1.243-2.593.32-5.435,0.888-5.967,4.014C158.806,171.792,160.618,173.32,163.211,173.32Zm0.6-1.883c-1.67,0-2.735-.746-2.487-2.2,0.249-1.6,1.812-2.1,3.482-2.309,0.888-.107,3.3-0.356,3.729-0.782l-0.319,1.918A4.289,4.289,0,0,1,163.815,171.437Zm17.721-12.078h-2.921l0.541-3.268h-2.1l-0.541,3.268h-2.052l-0.284,1.776h2.043l-1.4,8.526a2.88,2.88,0,0,0,3.091,3.517,5.026,5.026,0,0,0,1.634-.249l-0.142-1.883a4.017,4.017,0,0,1-.924.107c-0.923,0-1.776-.284-1.492-2.06l1.324-7.958h2.939Zm10.405,8.064a4.329,4.329,0,0,1-4.12,3.73c-1.741,0-2.771-1.279-2.451-3.268l1.421-8.526h-2.1l-1.457,8.668c-0.568,3.481,1,5.151,3.553,5.151a5.185,5.185,0,0,0,4.475-2.487h0.143L191.018,173h2.1l2.273-13.641h-2.1Zm5,5.577h2.1l1.421-8.632a3.953,3.953,0,0,1,3.979-3.2,2.457,2.457,0,0,1,1.172.213l1-1.74a2.365,2.365,0,0,0-1.634-.5,4.5,4.5,0,0,0-3.943,2.274h-0.143l0.356-2.061h-2.025Zm14.846,0.284a6.446,6.446,0,0,0,6-3.445l-1.918-.569a4.023,4.023,0,0,1-3.766,2.131c-2.628,0-4.031-1.172-3.525-4.511h9.92l0.142-.888c0.861-5.08-1.874-6.82-4.725-6.82-3.685,0-6.634,2.913-7.317,7.1C205.9,170.478,207.857,173.284,211.791,173.284Zm-2.913-8.206a5.268,5.268,0,0,1,4.724-4.014c2.283,0,3.455,1.706,3.055,4.014h-7.779Zm21.1-.852h-7.957l-0.32,1.954h7.957ZM232.208,173h2.1l1.42-8.632a3.959,3.959,0,0,1,3.979-3.2,6.363,6.363,0,0,1,1.279.142l0.355-2.131a10.544,10.544,0,0,0-1.1-.036,4.538,4.538,0,0,0-3.943,2.274h-0.142l0.355-2.061h-2.025Zm9.339,0h2.1l2.274-13.641h-2.1Zm3.961-15.95a1.422,1.422,0,1,0-1.492-1.42A1.464,1.464,0,0,0,245.508,157.05Zm7.476,16.234a6.244,6.244,0,0,0,6.074-4.263h-2.1a3.9,3.9,0,0,1-3.659,2.38c-2.5,0-3.774-2.06-3.232-5.221,0.488-3.091,2.477-5.116,4.937-5.116a2.628,2.628,0,0,1,2.878,2.38h2.1c0.062-2.557-1.821-4.262-4.689-4.262-3.7,0-6.652,2.913-7.318,7.069C247.291,170.336,249.138,173.284,252.984,173.284Zm11.9-8.49a4.229,4.229,0,0,1,4.228-3.73c1.953,0,2.948,1.244,2.593,3.411L270.279,173h2.1l1.456-8.667c0.568-3.517-1.03-5.151-3.8-5.151a4.741,4.741,0,0,0-4.4,2.309h-0.177l1.1-6.679h-2.1L261.434,173h2.095Zm23.865,8.49a6.244,6.244,0,0,0,6.074-4.263h-2.1a3.894,3.894,0,0,1-3.659,2.38c-2.5,0-3.774-2.06-3.232-5.221,0.488-3.091,2.478-5.116,4.937-5.116a2.628,2.628,0,0,1,2.878,2.38h2.1c0.062-2.557-1.821-4.262-4.689-4.262-3.7,0-6.652,2.913-7.318,7.069C283.051,170.336,284.9,173.284,288.744,173.284Zm14.1,0c3.623,0,6.5-2.735,7.175-6.891,0.7-4.334-1.349-7.211-5.115-7.211-3.623,0-6.5,2.735-7.175,6.927C297.034,170.407,299.076,173.284,302.842,173.284Zm0.035-1.883c-2.735,0-3.552-2.344-3.126-5.008,0.453-2.806,2.238-5.329,5.116-5.329,2.735,0,3.552,2.38,3.126,5.045C307.54,168.924,305.755,171.4,302.877,171.4Zm9.231,1.6h2.1l1.421-8.525a3.978,3.978,0,0,1,3.659-3.411,2.092,2.092,0,0,1,2.273,2.594L319.994,173h2.132l1.492-8.881a3.45,3.45,0,0,1,3.516-3.055c1.492,0,2.629.782,2.309,2.807L327.916,173h2.1l1.527-9.129c0.533-3.2-.959-4.689-3.374-4.689a5.191,5.191,0,0,0-4.476,2.309h-0.142a3.076,3.076,0,0,0-3.268-2.309,4.487,4.487,0,0,0-4.05,2.309h-0.178l0.356-2.132h-2.025Zm21.734,0h2.1l1.421-8.525a3.978,3.978,0,0,1,3.659-3.411,2.093,2.093,0,0,1,2.274,2.594L341.728,173h2.131l1.492-8.881a3.45,3.45,0,0,1,3.517-3.055c1.491,0,2.628.782,2.309,2.807L349.649,173h2.1l1.527-9.129c0.533-3.2-.959-4.689-3.374-4.689a5.191,5.191,0,0,0-4.476,2.309H345.28a3.075,3.075,0,0,0-3.268-2.309,4.487,4.487,0,0,0-4.05,2.309h-0.177l0.355-2.132h-2.025Zm25.711,0.32a5.173,5.173,0,0,0,4.405-2.167h0.107L363.745,173h2.1l1.492-8.987c0.71-4.334-2.487-4.831-4.228-4.831a6.659,6.659,0,0,0-6,3.2l1.847,0.71a4.674,4.674,0,0,1,3.979-2.06c1.918,0,2.628,1.03,2.344,2.771v0.071c-0.177.994-1.207,0.923-3.73,1.243-2.593.32-5.435,0.888-5.967,4.014C355.148,171.792,356.96,173.32,359.553,173.32Zm0.6-1.883c-1.67,0-2.735-.746-2.487-2.2,0.249-1.6,1.812-2.1,3.482-2.309,0.888-.107,3.3-0.356,3.73-0.782l-0.32,1.918A4.289,4.289,0,0,1,360.157,171.437Zm12.961-6.643a4.154,4.154,0,0,1,4.121-3.73c1.954,0,2.913,1.279,2.558,3.411L378.376,173h2.1l1.457-8.667c0.568-3.482-1.03-5.151-3.765-5.151a4.869,4.869,0,0,0-4.37,2.309h-0.177l0.355-2.132h-2.025L369.673,173h2.095Zm16.336,8.49a5.166,5.166,0,0,0,4.369-2.38h0.249l-0.355,2.1h2.024l3.02-18.188h-2.1l-1.119,6.714h-0.178a3.382,3.382,0,0,0-3.552-2.344c-3.446,0-6.278,2.735-7,7.033C384.108,170.549,386.035,173.284,389.454,173.284Zm0.515-1.883c-2.629,0-3.526-2.308-3.037-5.221,0.47-2.878,2.087-5.116,4.742-5.116,2.567,0,3.65,2.061,3.144,5.116C394.3,169.27,392.491,171.4,389.969,171.4Zm19.16-7.175h-7.957l-0.32,1.954h7.957Zm7.349-9.414h-2.1L411.363,173h2.1ZM417.293,173h2.1l2.273-13.641h-2.1Zm3.961-15.95a1.422,1.422,0,1,0-1.492-1.42A1.464,1.464,0,0,0,421.254,157.05Zm5.415,7.744a4.153,4.153,0,0,1,4.121-3.73c1.953,0,2.912,1.279,2.557,3.411L431.926,173h2.1l1.457-8.667c0.568-3.482-1.031-5.151-3.766-5.151a4.869,4.869,0,0,0-4.369,2.309h-0.178l0.356-2.132H425.5L423.223,173h2.1Zm16.868,8.49a6.446,6.446,0,0,0,6-3.445l-1.918-.569a4.023,4.023,0,0,1-3.766,2.131c-2.629,0-4.032-1.172-3.525-4.511h9.919l0.142-.888c0.862-5.08-1.874-6.82-4.724-6.82-3.686,0-6.634,2.913-7.318,7.1C437.65,170.478,439.6,173.284,443.537,173.284Zm-2.912-8.206a5.268,5.268,0,0,1,4.724-4.014c2.282,0,3.455,1.706,3.055,4.014h-7.779Zm22.834,8.242a5.171,5.171,0,0,0,4.4-2.167h0.107L467.65,173h2.1l1.492-8.987c0.711-4.334-2.487-4.831-4.227-4.831a6.658,6.658,0,0,0-6,3.2l1.847,0.71a4.671,4.671,0,0,1,3.978-2.06c1.918,0,2.629,1.03,2.345,2.771v0.071c-0.178.994-1.208,0.923-3.73,1.243-2.593.32-5.435,0.888-5.968,4.014C459.054,171.792,460.866,173.32,463.459,173.32Zm0.6-1.883c-1.67,0-2.736-.746-2.487-2.2,0.249-1.6,1.812-2.1,3.481-2.309,0.888-.107,3.3-0.356,3.73-0.782l-0.32,1.918A4.289,4.289,0,0,1,464.063,171.437Zm19.035-4.014a4.329,4.329,0,0,1-4.12,3.73c-1.741,0-2.771-1.279-2.452-3.268l1.421-8.526h-2.1l-1.457,8.668c-0.568,3.481,1,5.151,3.552,5.151a5.185,5.185,0,0,0,4.476-2.487h0.142L482.175,173h2.095l2.274-13.641h-2.1Zm10.155,5.861a5.166,5.166,0,0,0,4.369-2.38h0.249l-0.355,2.1h2.024l3.02-18.188h-2.1l-1.119,6.714h-0.178a3.382,3.382,0,0,0-3.552-2.344c-3.446,0-6.278,2.735-7,7.033C487.907,170.549,489.834,173.284,493.253,173.284Zm0.515-1.883c-2.629,0-3.526-2.308-3.037-5.221,0.471-2.878,2.087-5.116,4.742-5.116,2.567,0,3.65,2.061,3.144,5.116C498.1,169.27,496.29,171.4,493.768,171.4Zm9.888,1.6h2.1l2.273-13.641H505.93Zm3.961-15.95a1.422,1.422,0,1,0-1.492-1.42A1.464,1.464,0,0,0,507.617,157.05Zm7.618,16.234c3.623,0,6.5-2.735,7.175-6.891,0.7-4.334-1.35-7.211-5.115-7.211-3.623,0-6.5,2.735-7.176,6.927C509.427,170.407,511.469,173.284,515.235,173.284Zm0.035-1.883c-2.735,0-3.552-2.344-3.126-5.008,0.453-2.806,2.238-5.329,5.115-5.329,2.736,0,3.553,2.38,3.126,5.045C519.932,168.924,518.147,171.4,515.27,171.4Zm18.822-17.441h-1.918l-9.449,21.775h1.918Zm12.75,5.4H544.5l-5.577,10.87h-0.142l-1.954-10.87h-2.2L537.393,173h2.132ZM548.188,173h2.1l2.274-13.641h-2.1Zm3.96-15.95a1.422,1.422,0,1,0-1.492-1.42A1.464,1.464,0,0,0,552.148,157.05Zm7.121,16.234a5.167,5.167,0,0,0,4.369-2.38h0.249l-0.356,2.1h2.025l3.02-18.188h-2.1l-1.119,6.714h-0.178a3.382,3.382,0,0,0-3.552-2.344c-3.446,0-6.279,2.735-7,7.033C553.922,170.549,555.85,173.284,559.269,173.284Zm0.515-1.883c-2.629,0-3.526-2.308-3.037-5.221,0.47-2.878,2.086-5.116,4.742-5.116,2.566,0,3.65,2.061,3.144,5.116C564.117,169.27,562.306,171.4,559.784,171.4Zm15.572,1.883a6.446,6.446,0,0,0,6-3.445l-1.918-.569a4.023,4.023,0,0,1-3.766,2.131c-2.628,0-4.032-1.172-3.525-4.511h9.919l0.142-.888c0.862-5.08-1.873-6.82-4.724-6.82-3.686,0-6.634,2.913-7.318,7.1C569.468,170.478,571.421,173.284,575.356,173.284Zm-2.913-8.206a5.268,5.268,0,0,1,4.724-4.014c2.282,0,3.455,1.706,3.055,4.014h-7.779Zm17.472,8.206c3.623,0,6.5-2.735,7.176-6.891,0.7-4.334-1.35-7.211-5.116-7.211-3.623,0-6.5,2.735-7.175,6.927C584.107,170.407,586.15,173.284,589.915,173.284Zm0.036-1.883c-2.736,0-3.553-2.344-3.126-5.008,0.452-2.806,2.237-5.329,5.115-5.329,2.735,0,3.552,2.38,3.126,5.045C594.613,168.924,592.828,171.4,589.951,171.4Zm21.413,1.883a5.166,5.166,0,0,0,4.369-2.38h0.249l-0.356,2.1h2.025l3.02-18.188h-2.1l-1.119,6.714h-0.178a3.382,3.382,0,0,0-3.552-2.344c-3.446,0-6.279,2.735-7,7.033C606.018,170.549,607.945,173.284,611.364,173.284Zm0.515-1.883c-2.629,0-3.526-2.308-3.037-5.221,0.47-2.878,2.087-5.116,4.742-5.116,2.566,0,3.65,2.061,3.144,5.116C616.213,169.27,614.4,171.4,611.879,171.4Zm15.536,1.883c3.623,0,6.5-2.735,7.176-6.891,0.7-4.334-1.35-7.211-5.116-7.211-3.623,0-6.5,2.735-7.175,6.927C621.607,170.407,623.65,173.284,627.415,173.284Zm0.036-1.883c-2.736,0-3.553-2.344-3.126-5.008,0.453-2.806,2.238-5.329,5.115-5.329,2.735,0,3.552,2.38,3.126,5.045C632.113,168.924,630.328,171.4,627.451,171.4Zm11.86,1.6h2.06l4.724-10.479h0.213L647.552,173h2.06l6.43-13.641h-2.2l-4.689,10.408h-0.142l-1.137-10.408h-2.237L640.98,169.8h-0.142l-1.208-10.444h-2.2Zm20.667-8.206a4.154,4.154,0,0,1,4.121-3.73c1.954,0,2.913,1.279,2.557,3.411L665.236,173h2.1l1.457-8.667c0.568-3.482-1.03-5.151-3.766-5.151a4.869,4.869,0,0,0-4.369,2.309h-0.177l0.355-2.132h-2.025L656.533,173h2.1Zm16.3-9.982h-2.1L671.163,173h2.1Zm6.463,18.472c3.624,0,6.5-2.735,7.176-6.891,0.7-4.334-1.35-7.211-5.115-7.211-3.624,0-6.5,2.735-7.176,6.927C676.934,170.407,678.976,173.284,682.741,173.284Zm0.036-1.883c-2.735,0-3.552-2.344-3.126-5.008,0.453-2.806,2.238-5.329,5.115-5.329,2.735,0,3.552,2.38,3.126,5.045C687.439,168.924,685.654,171.4,682.777,171.4Zm13.21,1.919a5.171,5.171,0,0,0,4.4-2.167H700.5L700.178,173h2.1l1.492-8.987c0.71-4.334-2.487-4.831-4.227-4.831a6.658,6.658,0,0,0-6,3.2l1.847,0.71a4.671,4.671,0,0,1,3.978-2.06c1.918,0,2.629,1.03,2.345,2.771v0.071c-0.178.994-1.208,0.923-3.73,1.243-2.593.32-5.435,0.888-5.968,4.014C691.582,171.792,693.393,173.32,695.987,173.32Zm0.6-1.883c-1.669,0-2.735-.746-2.486-2.2,0.249-1.6,1.812-2.1,3.481-2.309,0.888-.107,3.3-0.356,3.73-0.782L701,168.062A4.289,4.289,0,0,1,696.59,171.437Zm14.667,1.847a5.166,5.166,0,0,0,4.369-2.38h0.249L715.52,173h2.024l3.02-18.188h-2.1l-1.119,6.714h-0.178a3.382,3.382,0,0,0-3.552-2.344c-3.446,0-6.279,2.735-7,7.033C705.911,170.549,707.838,173.284,711.257,173.284Zm0.515-1.883c-2.629,0-3.526-2.308-3.037-5.221,0.47-2.878,2.087-5.116,4.742-5.116,2.566,0,3.65,2.061,3.144,5.116C716.106,169.27,714.294,171.4,711.772,171.4Zm15.572,1.883a6.446,6.446,0,0,0,6-3.445l-1.918-.569a4.023,4.023,0,0,1-3.766,2.131c-2.628,0-4.031-1.172-3.525-4.511h9.919L734.2,166c0.862-5.08-1.873-6.82-4.724-6.82-3.685,0-6.634,2.913-7.318,7.1C721.456,170.478,723.41,173.284,727.344,173.284Zm-2.913-8.206a5.268,5.268,0,0,1,4.724-4.014c2.283,0,3.455,1.706,3.055,4.014h-7.779ZM736.255,173h2.1l1.421-8.632a3.953,3.953,0,0,1,3.978-3.2,2.462,2.462,0,0,1,1.173.213l0.994-1.74a2.361,2.361,0,0,0-1.634-.5,4.5,4.5,0,0,0-3.943,2.274H740.2l0.355-2.061h-2.024Z"/>
+ <path class="cls-3" d="M223.055,40.6l36.161,20.584L223.055,82.439,231.011,94.4l43.337-26.926V54.9l-43.5-26.926Zm97.968,76.216v-13.8H265.391v13.8h55.632Z"/>
+ <path class="cls-4" d="M344.32,103.5a12.975,12.975,0,0,1-5.424-3.824,17.885,17.885,0,0,1-3.269-5.911,31.164,31.164,0,0,1-1.46-6.815,40.017,40.017,0,0,1-.07-7.093,64.082,64.082,0,0,1,.974-7.232l2.921-18.081a47.854,47.854,0,0,1,2.712-9.6,26.529,26.529,0,0,1,5.424-8.484,23.518,23.518,0,0,1,8.345-5.7,25.528,25.528,0,0,1,9.666-1.947,20.709,20.709,0,0,1,5.633.765,11.788,11.788,0,0,1,4.728,2.573,14.385,14.385,0,0,1,3.269,4.312,21.361,21.361,0,0,1,1.808,5.146L386.253,1.41h19.471L388.9,103.635h-19.61l1.53-9.458a20.581,20.581,0,0,1-3.755,4.52,24.418,24.418,0,0,1-4.8,3.408,20.667,20.667,0,0,1-5.424,2.016,26.173,26.173,0,0,1-5.633.626A19.058,19.058,0,0,1,344.32,103.5Zm21.836-15.507a9.49,9.49,0,0,0,3.616-2.086,10.662,10.662,0,0,0,2.5-3.407,13.948,13.948,0,0,0,1.182-3.894l3.06-18.08q0.277-1.669.417-3.407A28.276,28.276,0,0,0,377,53.7a15.584,15.584,0,0,0-.487-3.268,9.346,9.346,0,0,0-1.252-2.851,5.5,5.5,0,0,0-2.364-1.947,7.659,7.659,0,0,0-3.2-.7,12.622,12.622,0,0,0-4.659.9,11.3,11.3,0,0,0-4.033,2.712,12.64,12.64,0,0,0-2.643,4.033,17.554,17.554,0,0,0-1.182,4.59l-3.06,18.081a23.192,23.192,0,0,0-.348,4.729,11.38,11.38,0,0,0,.974,4.381,6.567,6.567,0,0,0,2.921,3.129,8.9,8.9,0,0,0,4.381,1.113A13.776,13.776,0,0,0,366.156,87.988Zm33.588-.487h16.689l11.544-69.958H415.46V1.41h34.77L435.905,87.5h12.517v16.134H399.744V87.5ZM468.31,29.922h19.611l-1.53,9.319a31.287,31.287,0,0,1,3.825-4.451,21.413,21.413,0,0,1,4.728-3.477,19.049,19.049,0,0,1,5.425-1.947A29.445,29.445,0,0,1,506,28.809a19.093,19.093,0,0,1,6.885,1.252,13.041,13.041,0,0,1,5.424,3.825,18.154,18.154,0,0,1,3.268,5.841,30.317,30.317,0,0,1,1.461,6.746,41.152,41.152,0,0,1,.069,7.163q-0.28,3.687-.834,7.3l-3.06,18.081a48.3,48.3,0,0,1-2.712,9.457,27.978,27.978,0,0,1-5.355,8.553A22.306,22.306,0,0,1,502.8,102.8a26.069,26.069,0,0,1-9.735,1.947,20.92,20.92,0,0,1-5.563-.765,11.135,11.135,0,0,1-4.729-2.573,14.869,14.869,0,0,1-3.2-4.311,26.622,26.622,0,0,1-1.947-5.146l-6.676,40.194H451.482ZM492.163,87.71A11.256,11.256,0,0,0,496.2,85a12.482,12.482,0,0,0,2.643-4.1,24.835,24.835,0,0,0,1.321-4.52l2.921-18.081a23.24,23.24,0,0,0,.347-4.729,11.453,11.453,0,0,0-.973-4.381,6.61,6.61,0,0,0-2.921-3.129,8.932,8.932,0,0,0-4.381-1.113,13.761,13.761,0,0,0-4.1.626,9.46,9.46,0,0,0-3.616,2.086A9.791,9.791,0,0,0,485,51.062a21.249,21.249,0,0,0-1.252,3.894l-2.92,18.081q-0.28,1.669-.487,3.407a16.193,16.193,0,0,0-.07,3.338,25.184,25.184,0,0,0,.487,3.2A8.979,8.979,0,0,0,481.94,85.9a5.3,5.3,0,0,0,2.365,2.017,7.619,7.619,0,0,0,3.2.7A12.589,12.589,0,0,0,492.163,87.71Z"/>
</svg>
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 4bed5af..04536e2 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -107,6 +107,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
+ with:
+ fetch-depth: 0 # Needed for changelog
- uses: actions/setup-python@v5
with:
python-version: "3.10"
@@ -123,16 +125,18 @@ jobs:
sudo apt -y install zip pandoc man sed
cat > ./requirements.txt << EOF
python=3.10.*
+ pyinstaller
brotli-python
EOF
python devscripts/install_deps.py --print \
--exclude brotli --exclude brotlicffi \
- --include secretstorage --include pyinstaller >> ./requirements.txt
+ --include secretstorage >> ./requirements.txt
mamba create -n build --file ./requirements.txt
- name: Prepare
run: |
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
+ python devscripts/update_changelog.py -vv
python devscripts/make_lazy_extractors.py
- name: Build Unix platform-independent binary
run: |
@@ -247,6 +251,22 @@ jobs:
python3 devscripts/install_deps.py --print --include pyinstaller > requirements.txt
# We need to ignore wheels otherwise we break universal2 builds
python3 -m pip install -U --user --no-binary :all: -r requirements.txt
+ # We need to fuse our own universal2 wheels for curl_cffi
+ python3 -m pip install -U --user delocate
+ mkdir curl_cffi_whls curl_cffi_universal2
+ python3 devscripts/install_deps.py --print -o --include curl_cffi > requirements.txt
+ for platform in "macosx_11_0_arm64" "macosx_11_0_x86_64"; do
+ python3 -m pip download \
+ --only-binary=:all: \
+ --platform "${platform}" \
+ --pre -d curl_cffi_whls \
+ -r requirements.txt
+ done
+ python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/curl_cffi*.whl -w curl_cffi_universal2
+ python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/cffi*.whl -w curl_cffi_universal2
+ cd curl_cffi_universal2
+ for wheel in *cffi*.whl; do mv -n -- "${wheel}" "${wheel/x86_64/universal2}"; done
+ python3 -m pip install -U --user *cffi*.whl
- name: Prepare
run: |
@@ -342,7 +362,7 @@ jobs:
- name: Install Requirements
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
python devscripts/install_deps.py -o --include build
- python devscripts/install_deps.py --include py2exe
+ python devscripts/install_deps.py --include py2exe --include curl_cffi
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.8.0-py3-none-any.whl"
- name: Prepare
@@ -447,8 +467,9 @@ jobs:
- name: Make SHA2-SUMS files
run: |
cd ./artifact/
- sha256sum * > ../SHA2-256SUMS
- sha512sum * > ../SHA2-512SUMS
+ # make sure SHA sums are also printed to stdout
+ sha256sum * | tee ../SHA2-256SUMS
+ sha512sum * | tee ../SHA2-512SUMS
- name: Make Update spec
run: |
diff --git a/.github/workflows/core.yml b/.github/workflows/core.yml
index ba86306..076f785 100644
--- a/.github/workflows/core.yml
+++ b/.github/workflows/core.yml
@@ -53,7 +53,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- name: Install test requirements
- run: python3 ./devscripts/install_deps.py --include dev
+ run: python3 ./devscripts/install_deps.py --include dev --include curl_cffi
- name: Run tests
continue-on-error: False
run: |
diff --git a/.github/workflows/quick-test.yml b/.github/workflows/quick-test.yml
index 3114e7b..24b3491 100644
--- a/.github/workflows/quick-test.yml
+++ b/.github/workflows/quick-test.yml
@@ -27,6 +27,8 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
+ with:
+ python-version: '3.8'
- name: Install flake8
run: python3 ./devscripts/install_deps.py -o --include dev
- name: Make lazy extractors
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index fd99cec..32268b3 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -189,13 +189,8 @@ jobs:
if: |
!inputs.prerelease && env.target_repo == github.repository
run: |
+ python devscripts/update_changelog.py -vv
make doc
- sed '/### /Q' Changelog.md >> ./CHANGELOG
- echo '### ${{ env.version }}' >> ./CHANGELOG
- python ./devscripts/make_changelog.py -vv -c >> ./CHANGELOG
- echo >> ./CHANGELOG
- grep -Poz '(?s)### \d+\.\d+\.\d+.+' 'Changelog.md' | head -n -1 >> ./CHANGELOG
- cat ./CHANGELOG > Changelog.md
- name: Push to release
id: push_release
@@ -266,6 +261,7 @@ jobs:
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
run: |
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
+ python devscripts/update_changelog.py -vv
python devscripts/make_lazy_extractors.py
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index 6ee3baa..8b5d19a 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -600,3 +600,13 @@ xpadev-net
Xpl0itU
YoshichikaAAA
zhijinwuu
+alb
+hruzgar
+kasper93
+leoheitmannruiz
+luiso1979
+nipotan
+Offert4324
+sta1us
+Tomoka1
+trwstin
diff --git a/Changelog.md b/Changelog.md
index 45a9cef..6cf08be 100644
--- a/Changelog.md
+++ b/Changelog.md
@@ -4,6 +4,101 @@
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
-->
+### 2024.04.09
+
+#### Important changes
+- Security: [[CVE-2024-22423](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-22423)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-hjq6-52gw-2g7p)
+ - The shell escape function now properly escapes `%`, `\` and `\n`.
+ - `utils.Popen` has been patched accordingly.
+
+#### Core changes
+- [Add new option `--progress-delta`](https://github.com/yt-dlp/yt-dlp/commit/9590cc6b4768e190183d7d071a6c78170889116a) ([#9082](https://github.com/yt-dlp/yt-dlp/issues/9082)) by [Grub4K](https://github.com/Grub4K)
+- [Add new options `--impersonate` and `--list-impersonate-targets`](https://github.com/yt-dlp/yt-dlp/commit/0b81d4d252bd065ccd352722987ea34fe17f9244) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
+- [Add option `--no-break-on-existing`](https://github.com/yt-dlp/yt-dlp/commit/16be117729150b2784f3b17755c886cb0cf73374) ([#9610](https://github.com/yt-dlp/yt-dlp/issues/9610)) by [bashonly](https://github.com/bashonly)
+- [Fix `filesize_approx` calculation](https://github.com/yt-dlp/yt-dlp/commit/86e3b82261e8ebc6c6707c09544c9dfb8907c0fd) ([#9560](https://github.com/yt-dlp/yt-dlp/issues/9560)) by [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
+- [Infer `acodec` for single-codec containers](https://github.com/yt-dlp/yt-dlp/commit/86a972033e05fea80e5fe7f2aff6723dbe2f3952) by [pukkandan](https://github.com/pukkandan)
+- [Prevent RCE when using `--exec` with `%q` (CVE-2024-22423)](https://github.com/yt-dlp/yt-dlp/commit/ff07792676f404ffff6ee61b5638c9dc1a33a37a) by [Grub4K](https://github.com/Grub4K)
+- **cookies**: [Add `--cookies-from-browser` support for Firefox Flatpak](https://github.com/yt-dlp/yt-dlp/commit/2ab2651a4a7be18939e2b4cb21be79fe477c797a) ([#9619](https://github.com/yt-dlp/yt-dlp/issues/9619)) by [un-def](https://github.com/un-def)
+- **utils**
+ - `traverse_obj`
+ - [Allow unbranching using `all` and `any`](https://github.com/yt-dlp/yt-dlp/commit/3699eeb67cad333272b14a42dd3843d93fda1a2e) ([#9571](https://github.com/yt-dlp/yt-dlp/issues/9571)) by [Grub4K](https://github.com/Grub4K)
+ - [Convenience improvements](https://github.com/yt-dlp/yt-dlp/commit/32abfb00bdbd119ca675fdc6d1719331f0a2741a) ([#9577](https://github.com/yt-dlp/yt-dlp/issues/9577)) by [Grub4K](https://github.com/Grub4K)
+
+#### Extractor changes
+- [Add extractor impersonate API](https://github.com/yt-dlp/yt-dlp/commit/50c29352312f5662acf9a64b0012766f5c40af61) ([#9474](https://github.com/yt-dlp/yt-dlp/issues/9474)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
+- **afreecatv**
+ - [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/9415f1a5ef88482ebafe3083e8bcb778ac512df7) ([#9566](https://github.com/yt-dlp/yt-dlp/issues/9566)) by [bashonly](https://github.com/bashonly), [Tomoka1](https://github.com/Tomoka1)
+ - live: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9073ae6458f4c6a832aa832c67174c61852869be) ([#9348](https://github.com/yt-dlp/yt-dlp/issues/9348)) by [hui1601](https://github.com/hui1601)
+- **asobistage**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/0284f1fee202302a78888420f933deae19d9f4e1) ([#8735](https://github.com/yt-dlp/yt-dlp/issues/8735)) by [pzhlkj6612](https://github.com/pzhlkj6612)
+- **box**: [Support URLs without file IDs](https://github.com/yt-dlp/yt-dlp/commit/07f5b2f7570fd9ac85aed17f4c0118f6eac77beb) ([#9504](https://github.com/yt-dlp/yt-dlp/issues/9504)) by [shreyasminocha](https://github.com/shreyasminocha)
+- **cbc.ca**: player: [Support new URL format](https://github.com/yt-dlp/yt-dlp/commit/b49d5ffc53a72d8245ba319ff07bdc5b8c6a4f0c) ([#9561](https://github.com/yt-dlp/yt-dlp/issues/9561)) by [trainman261](https://github.com/trainman261)
+- **crunchyroll**
+ - [Extract `vo_adaptive_hls` formats by default](https://github.com/yt-dlp/yt-dlp/commit/be77923ffe842f667971019460f6005f3cad01eb) ([#9447](https://github.com/yt-dlp/yt-dlp/issues/9447)) by [bashonly](https://github.com/bashonly)
+ - [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/954e57e405f79188450eb30103a9308732cd318f) ([#9615](https://github.com/yt-dlp/yt-dlp/issues/9615)) by [bytedream](https://github.com/bytedream)
+- **dropbox**: [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/a48cc86d6f6b20427553620c2ddb990ede6a4b41) ([#9627](https://github.com/yt-dlp/yt-dlp/issues/9627)) by [bashonly](https://github.com/bashonly)
+- **fathom**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/bc2b8c0596fd6b75af24822c4f0f1da6783d71f7) ([#9495](https://github.com/yt-dlp/yt-dlp/issues/9495)) by [src-tinkerer](https://github.com/src-tinkerer)
+- **gofile**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0da66980d3193cad3dae0120cddddbfcabddf7a1) ([#9446](https://github.com/yt-dlp/yt-dlp/issues/9446)) by [jazz1611](https://github.com/jazz1611)
+- **imgur**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/86d2f4d24849af0d1f3af7c0e2ac43bf8a058f74) ([#9471](https://github.com/yt-dlp/yt-dlp/issues/9471)) by [trwstin](https://github.com/trwstin)
+- **jiosaavn**
+ - [Extract artists](https://github.com/yt-dlp/yt-dlp/commit/0ae16ceb1846cc4e609b70ce7c5d8e7458efceb2) ([#9612](https://github.com/yt-dlp/yt-dlp/issues/9612)) by [bashonly](https://github.com/bashonly)
+ - [Fix format extensions](https://github.com/yt-dlp/yt-dlp/commit/443e206ec41e64ca2aef61d8ef91640fb69b3113) ([#9609](https://github.com/yt-dlp/yt-dlp/issues/9609)) by [bashonly](https://github.com/bashonly)
+ - [Support playlists](https://github.com/yt-dlp/yt-dlp/commit/2e94602f241f6e41bdc48576c61089435529339b) ([#9622](https://github.com/yt-dlp/yt-dlp/issues/9622)) by [bashonly](https://github.com/bashonly)
+- **joqrag**: [Fix live status detection](https://github.com/yt-dlp/yt-dlp/commit/f2fd449b46c4058222e1744f7a35caa20b2d003d) ([#9624](https://github.com/yt-dlp/yt-dlp/issues/9624)) by [pzhlkj6612](https://github.com/pzhlkj6612)
+- **kick**: [Support browser impersonation](https://github.com/yt-dlp/yt-dlp/commit/c8a61a910096c77ce08dad5e1b2fbda5eb964156) ([#9611](https://github.com/yt-dlp/yt-dlp/issues/9611)) by [bashonly](https://github.com/bashonly)
+- **loom**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/f859ed3ba1e8b129ae6a467592c65687e73fbca1) ([#8686](https://github.com/yt-dlp/yt-dlp/issues/8686)) by [bashonly](https://github.com/bashonly), [hruzgar](https://github.com/hruzgar)
+- **medici**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4cd9e251b9abada107b10830de997bf4d79ca369) ([#9518](https://github.com/yt-dlp/yt-dlp/issues/9518)) by [Offert4324](https://github.com/Offert4324)
+- **mixch**
+ - [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4c3b7a0769706f7f0ea24adf1f219d5ae82d2b07) ([#9608](https://github.com/yt-dlp/yt-dlp/issues/9608)) by [bashonly](https://github.com/bashonly), [nipotan](https://github.com/nipotan)
+ - archive: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/c59de48e2bb4c681b03b93b584a05f52609ce4a0) ([#8761](https://github.com/yt-dlp/yt-dlp/issues/8761)) by [pzhlkj6612](https://github.com/pzhlkj6612)
+- **nhk**: [Fix NHK World extractors](https://github.com/yt-dlp/yt-dlp/commit/4af9d5c2f6aa81403ae2a8a5ae3cc824730f0b86) ([#9623](https://github.com/yt-dlp/yt-dlp/issues/9623)) by [bashonly](https://github.com/bashonly)
+- **patreon**: [Do not extract dead embed URLs](https://github.com/yt-dlp/yt-dlp/commit/36b240f9a72af57eb2c9d927ebb7fd1c917ebf18) ([#9613](https://github.com/yt-dlp/yt-dlp/issues/9613)) by [johnvictorfs](https://github.com/johnvictorfs)
+- **radio1be**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/36baaa10e06715ccba06b78885b2042c4844c826) ([#9122](https://github.com/yt-dlp/yt-dlp/issues/9122)) by [HobbyistDev](https://github.com/HobbyistDev)
+- **sharepoint**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/ff349ff94aae0b2b148bd3670f7c91d39c2f1d8e) ([#6531](https://github.com/yt-dlp/yt-dlp/issues/6531)) by [bashonly](https://github.com/bashonly), [C0D3D3V](https://github.com/C0D3D3V)
+- **sonylivseries**: [Fix season extraction](https://github.com/yt-dlp/yt-dlp/commit/f2868b26e917354203f82a370ad2396646edb813) ([#9423](https://github.com/yt-dlp/yt-dlp/issues/9423)) by [bashonly](https://github.com/bashonly)
+- **soundcloud**
+ - [Adjust format sorting](https://github.com/yt-dlp/yt-dlp/commit/a2d0840739cddd585d24e0ce4796394fc8a4fa2e) ([#9584](https://github.com/yt-dlp/yt-dlp/issues/9584)) by [bashonly](https://github.com/bashonly)
+ - [Support cookies](https://github.com/yt-dlp/yt-dlp/commit/97362712a1f2b04e735bdf54f749ad99165a62fe) ([#9586](https://github.com/yt-dlp/yt-dlp/issues/9586)) by [bashonly](https://github.com/bashonly)
+ - [Support retries for API rate-limit](https://github.com/yt-dlp/yt-dlp/commit/246571ae1d867df8bf31a056bdf3bbbfd398366a) ([#9585](https://github.com/yt-dlp/yt-dlp/issues/9585)) by [bashonly](https://github.com/bashonly)
+- **thisoldhouse**: [Support Brightcove embeds](https://github.com/yt-dlp/yt-dlp/commit/0df63cce69026d2f4c0cbb4dd36163e83eac93dc) ([#9576](https://github.com/yt-dlp/yt-dlp/issues/9576)) by [bashonly](https://github.com/bashonly)
+- **tiktok**
+ - [Fix API extraction](https://github.com/yt-dlp/yt-dlp/commit/cb61e20c266facabb7a30f9ce53bd79dfc158475) ([#9548](https://github.com/yt-dlp/yt-dlp/issues/9548)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
+ - [Prefer non-bytevc2 formats](https://github.com/yt-dlp/yt-dlp/commit/63f685f341f35f6f02b0368d1ba53bdb5b520410) ([#9575](https://github.com/yt-dlp/yt-dlp/issues/9575)) by [bashonly](https://github.com/bashonly)
+ - [Restore `carrier_region` API parameter](https://github.com/yt-dlp/yt-dlp/commit/fc53ec13ff1ee926a3e533a68cfca8acc887b661) ([#9637](https://github.com/yt-dlp/yt-dlp/issues/9637)) by [bashonly](https://github.com/bashonly)
+ - [Update API hostname](https://github.com/yt-dlp/yt-dlp/commit/8c05b3ebae23c5b444857549a85b84004c01a536) ([#9444](https://github.com/yt-dlp/yt-dlp/issues/9444)) by [bashonly](https://github.com/bashonly)
+- **twitch**: [Extract AV1 and HEVC formats](https://github.com/yt-dlp/yt-dlp/commit/02f93ff51b3ff9436d60c4993562b366eaae8851) ([#9158](https://github.com/yt-dlp/yt-dlp/issues/9158)) by [kasper93](https://github.com/kasper93)
+- **vkplay**: [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/b15b0c1d2106437ec61a5c436c543e8760eac160) ([#9636](https://github.com/yt-dlp/yt-dlp/issues/9636)) by [bashonly](https://github.com/bashonly)
+- **xvideos**: [Support new URL format](https://github.com/yt-dlp/yt-dlp/commit/aa7e9ae4f48276bd5d0173966c77db9484f65a0a) ([#9502](https://github.com/yt-dlp/yt-dlp/issues/9502)) by [sta1us](https://github.com/sta1us)
+- **youtube**
+ - [Calculate more accurate `filesize`](https://github.com/yt-dlp/yt-dlp/commit/a25a424323267e3f6f9f63c0b62df499bd7b8d46) by [pukkandan](https://github.com/pukkandan)
+ - [Update `android` params](https://github.com/yt-dlp/yt-dlp/commit/e7b17fce14775bd2448695c8eb7379b8d31d3537) by [pukkandan](https://github.com/pukkandan)
+ - search: [Fix params for uncensored results](https://github.com/yt-dlp/yt-dlp/commit/17d248a58781e2588d18a5ebe00c441d10011fcd) ([#9456](https://github.com/yt-dlp/yt-dlp/issues/9456)) by [alb](https://github.com/alb), [pukkandan](https://github.com/pukkandan)
+
+#### Downloader changes
+- **ffmpeg**: [Accept output args from info dict](https://github.com/yt-dlp/yt-dlp/commit/9c42b7eef547e826e9fcc7beb6706a2523949d05) ([#9278](https://github.com/yt-dlp/yt-dlp/issues/9278)) by [bashonly](https://github.com/bashonly)
+
+#### Networking changes
+- [Respect `SSLKEYLOGFILE` environment variable](https://github.com/yt-dlp/yt-dlp/commit/79a451e5763eda8b10d00684d5d3378f3255ee01) ([#9543](https://github.com/yt-dlp/yt-dlp/issues/9543)) by [luiso1979](https://github.com/luiso1979)
+- **Request Handler**
+ - curlcffi: [Add support for `curl_cffi`](https://github.com/yt-dlp/yt-dlp/commit/52f5be1f1e0dc45bb397ab950f564721976a39bf) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
+ - websockets: [Workaround race condition causing issues on PyPy](https://github.com/yt-dlp/yt-dlp/commit/e5d4f11104ce7ea1717a90eea82c0f7d230ea5d5) ([#9514](https://github.com/yt-dlp/yt-dlp/issues/9514)) by [coletdjnz](https://github.com/coletdjnz)
+
+#### Misc. changes
+- **build**
+ - [Do not include `curl_cffi` in `macos_legacy`](https://github.com/yt-dlp/yt-dlp/commit/b19ae095fdddd43c2a2c67d10fbe0d9a645bb98f) ([#9653](https://github.com/yt-dlp/yt-dlp/issues/9653)) by [bashonly](https://github.com/bashonly)
+ - [Optional dependencies cleanup](https://github.com/yt-dlp/yt-dlp/commit/58dd0f8d1eee6bc9fdc57f1923bed772fa3c946d) ([#9550](https://github.com/yt-dlp/yt-dlp/issues/9550)) by [bashonly](https://github.com/bashonly)
+ - [Print SHA sums to GHA logs](https://github.com/yt-dlp/yt-dlp/commit/e8032503b9517465b0e86d776fc1e60d8795d673) ([#9582](https://github.com/yt-dlp/yt-dlp/issues/9582)) by [bashonly](https://github.com/bashonly)
+ - [Update changelog for tarball and sdist](https://github.com/yt-dlp/yt-dlp/commit/17b96974a334688f76b57d350e07cae8cda46877) ([#9425](https://github.com/yt-dlp/yt-dlp/issues/9425)) by [bashonly](https://github.com/bashonly)
+- **cleanup**
+ - [Standardize `import datetime as dt`](https://github.com/yt-dlp/yt-dlp/commit/c305a25c1b16bcf7a5ec499c3b786ed1e2c748da) ([#8978](https://github.com/yt-dlp/yt-dlp/issues/8978)) by [pukkandan](https://github.com/pukkandan)
+ - ie: [No `from` stdlib imports in extractors](https://github.com/yt-dlp/yt-dlp/commit/e3a3ed8a981d9395c4859b6ef56cd02bc3148db2) by [pukkandan](https://github.com/pukkandan)
+ - Miscellaneous: [216f6a3](https://github.com/yt-dlp/yt-dlp/commit/216f6a3cb57824e6a3c859649ce058c199b1b247) by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
+- **docs**
+ - [Update yt-dlp tagline](https://github.com/yt-dlp/yt-dlp/commit/388c979ac63a8774339fac2516fe1cc852b4276e) ([#9481](https://github.com/yt-dlp/yt-dlp/issues/9481)) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
+ - [Various manpage fixes](https://github.com/yt-dlp/yt-dlp/commit/df0e138fc02ae2764a44f2f59fc93c756c4d3ee2) by [leoheitmannruiz](https://github.com/leoheitmannruiz)
+- **test**
+ - [Workaround websocket server hanging](https://github.com/yt-dlp/yt-dlp/commit/f849d77ab54788446b995d256e1ee0894c4fb927) ([#9467](https://github.com/yt-dlp/yt-dlp/issues/9467)) by [coletdjnz](https://github.com/coletdjnz)
+ - `traversal`: [Separate traversal tests](https://github.com/yt-dlp/yt-dlp/commit/979ce2e786f2ee3fc783b6dc1ef4188d8805c923) ([#9574](https://github.com/yt-dlp/yt-dlp/issues/9574)) by [Grub4K](https://github.com/Grub4K)
+
### 2024.03.10
#### Core changes
diff --git a/Makefile b/Makefile
index 9344003..cef4bc6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@ all: lazy-extractors yt-dlp doc pypi-files
clean: clean-test clean-dist
clean-all: clean clean-cache
completions: completion-bash completion-fish completion-zsh
-doc: README.md CONTRIBUTING.md issuetemplates supportedsites
+doc: README.md CONTRIBUTING.md CONTRIBUTORS issuetemplates supportedsites
ot: offlinetest
tar: yt-dlp.tar.gz
@@ -10,9 +10,12 @@ tar: yt-dlp.tar.gz
# intended use: when building a source distribution,
# make pypi-files && python3 -m build -sn .
pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
- completions yt-dlp.1 pyproject.toml setup.cfg devscripts/* test/*
+ completions yt-dlp.1 pyproject.toml setup.cfg devscripts/* test/*
-.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
+.PHONY: all clean clean-all clean-test clean-dist clean-cache \
+ completions completion-bash completion-fish completion-zsh \
+ doc issuetemplates supportedsites ot offlinetest codetest test \
+ tar pypi-files lazy-extractors install uninstall
clean-test:
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
@@ -156,5 +159,14 @@ yt-dlp.tar.gz: all
Makefile yt-dlp.1 README.txt completions .gitignore \
setup.cfg yt-dlp yt_dlp pyproject.toml devscripts test
-AUTHORS:
- git shortlog -s -n HEAD | cut -f2 | sort > AUTHORS
+AUTHORS: Changelog.md
+ @if [ -d '.git' ] && command -v git > /dev/null ; then \
+ echo 'Generating $@ from git commit history' ; \
+ git shortlog -s -n HEAD | cut -f2 | sort > $@ ; \
+ fi
+
+CONTRIBUTORS: Changelog.md
+ @if [ -d '.git' ] && command -v git > /dev/null ; then \
+ echo 'Updating $@ from git commit history' ; \
+ $(PYTHON) devscripts/make_changelog.py -v -c > /dev/null ; \
+ fi
diff --git a/README.md b/README.md
index 1e108a2..458541d 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@
</div>
<!-- MANPAGE: END EXCLUDED SECTION -->
-yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc). The main focus of this project is adding new features and patches while also keeping up to date with the original project
+yt-dlp is a feature-rich command-line audio/video downloader with support for [thousands of sites](supportedsites.md). The project is a fork of [youtube-dl](https://github.com/ytdl-org/youtube-dl) based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc).
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
@@ -158,6 +158,7 @@ When using `--update`/`-U`, a release binary will only update to its current cha
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
Example usage:
+
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
@@ -196,6 +197,15 @@ While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly
* [**websockets**](https://github.com/aaugustin/websockets)\* - For downloading over websocket. Licensed under [BSD-3-Clause](https://github.com/aaugustin/websockets/blob/main/LICENSE)
* [**requests**](https://github.com/psf/requests)\* - HTTP library. For HTTPS proxy and persistent connections support. Licensed under [Apache-2.0](https://github.com/psf/requests/blob/main/LICENSE)
+#### Impersonation
+
+The following provide support for impersonating browser requests. This may be required for some sites that employ TLS fingerprinting.
+
+* [**curl_cffi**](https://github.com/yifeikong/curl_cffi) (recommended) - Python binding for [curl-impersonate](https://github.com/lwthiker/curl-impersonate). Provides impersonation targets for Chrome, Edge and Safari. Licensed under [MIT](https://github.com/yifeikong/curl_cffi/blob/main/LICENSE)
+ * Can be installed with the `curl_cffi` group, e.g. `pip install yt-dlp[default,curl_cffi]`
+ * Currently only included in `yt-dlp.exe` and `yt-dlp_macos` builds
+
+
### Metadata
* [**mutagen**](https://github.com/quodlibet/mutagen)\* - For `--embed-thumbnail` in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
@@ -389,6 +399,10 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
direct connection
--socket-timeout SECONDS Time to wait before giving up, in seconds
--source-address IP Client-side IP address to bind to
+ --impersonate CLIENT[:OS] Client to impersonate for requests. E.g.
+ chrome, chrome-110, chrome:windows-10. Pass
+ --impersonate="" to impersonate any client.
+ --list-impersonate-targets List available clients to impersonate.
-4, --force-ipv4 Make all connections via IPv4
-6, --force-ipv6 Make all connections via IPv6
--enable-file-urls Enable file:// URLs. This is disabled by
@@ -468,6 +482,9 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
--max-downloads NUMBER Abort after downloading NUMBER files
--break-on-existing Stop the download process when encountering
a file that is in the archive
+ --no-break-on-existing Do not stop the download process when
+ encountering a file that is in the archive
+ (default)
--break-per-input Alters --max-downloads, --break-on-existing,
--break-match-filter, and autonumber to
reset per input URL
@@ -741,6 +758,7 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
accessible under "progress" key. E.g.
--console-title --progress-template
"download-title:%(info.id)s-%(progress.eta)s"
+ --progress-delta SECONDS Time between progress output (default: 0)
-v, --verbose Print various debugging information
--dump-pages Print downloaded pages encoded using base64
to debug problems (very verbose)
@@ -1459,9 +1477,9 @@ The following numeric meta fields can be used with comparisons `<`, `<=`, `>`, `
- `width`: Width of the video, if known
- `height`: Height of the video, if known
- `aspect_ratio`: Aspect ratio of the video, if known
- - `tbr`: Average bitrate of audio and video in KBit/s
- - `abr`: Average audio bitrate in KBit/s
- - `vbr`: Average video bitrate in KBit/s
+ - `tbr`: Average bitrate of audio and video in [kbps](## "1000 bits/sec")
+ - `abr`: Average audio bitrate in [kbps](## "1000 bits/sec")
+ - `vbr`: Average video bitrate in [kbps](## "1000 bits/sec")
- `asr`: Audio sampling rate in Hertz
- `fps`: Frame rate
- `audio_channels`: The number of audio channels
@@ -1486,7 +1504,7 @@ Any string comparison may be prefixed with negation `!` in order to produce an o
**Note**: None of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the website. Any other field made available by the extractor can also be used for filtering.
-Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "bv[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. You can also use the filters with `all` to download all formats that satisfy the filter, e.g. `-f "all[vcodec=none]"` selects all audio-only formats.
+Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "bv[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 kbps. You can also use the filters with `all` to download all formats that satisfy the filter, e.g. `-f "all[vcodec=none]"` selects all audio-only formats.
Format selectors can also be grouped using parentheses; e.g. `-f "(mp4,webm)[height<480]"` will download the best pre-merged mp4 and webm formats with a height lower than 480.
@@ -1518,10 +1536,10 @@ The available fields are:
- `fps`: Framerate of video
- `hdr`: The dynamic range of the video (`DV` > `HDR12` > `HDR10+` > `HDR10` > `HLG` > `SDR`)
- `channels`: The number of audio channels
- - `tbr`: Total average bitrate in KBit/s
- - `vbr`: Average video bitrate in KBit/s
- - `abr`: Average audio bitrate in KBit/s
- - `br`: Average bitrate in KBit/s, `tbr`/`vbr`/`abr`
+ - `tbr`: Total average bitrate in [kbps](## "1000 bits/sec")
+ - `vbr`: Average video bitrate in [kbps](## "1000 bits/sec")
+ - `abr`: Average audio bitrate in [kbps](## "1000 bits/sec")
+ - `br`: Average bitrate in [kbps](## "1000 bits/sec"), `tbr`/`vbr`/`abr`
- `asr`: Audio sample rate in Hz
**Deprecation warning**: Many of these fields have (currently undocumented) aliases, that may be removed in a future version. It is recommended to use only the documented field names.
@@ -1768,8 +1786,7 @@ The following extractors use this feature:
* `version`: The video version to extract - `uncut` or `simulcast`
#### crunchyrollbeta (Crunchyroll)
-* `format`: Which stream type(s) to extract (default: `adaptive_hls`). Potentially useful values include `adaptive_hls`, `adaptive_dash`, `vo_adaptive_hls`, `vo_adaptive_dash`, `download_hls`, `download_dash`, `multitrack_adaptive_hls_v2`
-* `hardsub`: Preference order for which hardsub versions to extract, or `all` (default: `None` = no hardsubs), e.g. `crunchyrollbeta:hardsub=en-US,None`
+* `hardsub`: One or more hardsub versions to extract (in order of preference), or `all` (default: `None` = no hardsubs will be extracted), e.g. `crunchyrollbeta:hardsub=en-US,de-DE`
#### vikichannel
* `video_types`: Types of videos to download - one or more of `episodes`, `movies`, `clips`, `trailers`
@@ -1792,9 +1809,12 @@ The following extractors use this feature:
* `max_comments`: Maximum number of comments to extract - default is `120`
#### tiktok
-* `api_hostname`: Hostname to use for mobile API requests, e.g. `api-h2.tiktokv.com`
-* `app_version`: App version to call mobile APIs with - should be set along with `manifest_app_version`, e.g. `20.2.1`
-* `manifest_app_version`: Numeric app version to call mobile APIs with, e.g. `221`
+* `api_hostname`: Hostname to use for mobile API calls, e.g. `api22-normal-c-alisg.tiktokv.com`
+* `app_name`: Default app name to use with mobile API calls, e.g. `trill`
+* `app_version`: Default app version to use with mobile API calls - should be set along with `manifest_app_version`, e.g. `34.1.2`
+* `manifest_app_version`: Default numeric app version to use with mobile API calls, e.g. `2023401020`
+* `aid`: Default app ID to use with API calls, e.g. `1180`
+* `app_info`: One or more app info strings in the format of `<iid>/[app_name]/[app_version]/[manifest_app_version]/[aid]`, where `iid` is the unique app install ID. `iid` is the only required value; all other values and their `/` separators can be omitted, e.g. `tiktok:app_info=1234567890123456789` or `tiktok:app_info=123,456/trill///1180,789//34.0.1/340001`
#### rokfinchannel
* `tab`: Which tab to download - one of `new`, `top`, `videos`, `podcasts`, `streams`, `stacks`
@@ -1874,6 +1894,7 @@ Plugins can be installed using various methods and locations.
`.zip`, `.egg` and `.whl` archives containing a `yt_dlp_plugins` namespace folder in their root are also supported as plugin packages.
+
* e.g. `${XDG_CONFIG_HOME}/yt-dlp/plugins/mypluginpkg.zip` where `mypluginpkg.zip` contains `yt_dlp_plugins/<type>/myplugin.py`
Run yt-dlp with `--verbose` to check if the plugin has been loaded.
diff --git a/bundle/py2exe.py b/bundle/py2exe.py
index ccb52ea..2811674 100755
--- a/bundle/py2exe.py
+++ b/bundle/py2exe.py
@@ -28,7 +28,7 @@ def main():
}],
version_info={
'version': VERSION,
- 'description': 'A youtube-dl fork with additional features and patches',
+ 'description': 'A feature-rich command-line audio/video downloader',
'comments': 'Official repository: <https://github.com/yt-dlp/yt-dlp>',
'product_name': 'yt-dlp',
'product_version': VERSION,
diff --git a/devscripts/changelog_override.json b/devscripts/changelog_override.json
index 2a34ad0..046060c 100644
--- a/devscripts/changelog_override.json
+++ b/devscripts/changelog_override.json
@@ -126,5 +126,26 @@
"when": "4ce57d3b873c2887814cbec03d029533e82f7db5",
"short": "[ie] Support multi-period MPD streams (#6654)",
"authors": ["alard", "pukkandan"]
+ },
+ {
+ "action": "change",
+ "when": "aa7e9ae4f48276bd5d0173966c77db9484f65a0a",
+ "short": "[ie/xvideos] Support new URL format (#9502)",
+ "authors": ["sta1us"]
+ },
+ {
+ "action": "remove",
+ "when": "22e4dfacb61f62dfbb3eb41b31c7b69ba1059b80"
+ },
+ {
+ "action": "change",
+ "when": "e3a3ed8a981d9395c4859b6ef56cd02bc3148db2",
+ "short": "[cleanup:ie] No `from` stdlib imports in extractors",
+ "authors": ["pukkandan"]
+ },
+ {
+ "action": "add",
+ "when": "9590cc6b4768e190183d7d071a6c78170889116a",
+ "short": "[priority] Security: [[CVE-2024-22423](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-22423)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-hjq6-52gw-2g7p)\n - The shell escape function now properly escapes `%`, `\\` and `\\n`.\n - `utils.Popen` has been patched accordingly."
}
]
diff --git a/devscripts/install_deps.py b/devscripts/install_deps.py
index 889d9ab..d33fc63 100755
--- a/devscripts/install_deps.py
+++ b/devscripts/install_deps.py
@@ -10,6 +10,8 @@ import argparse
import re
import subprocess
+from pathlib import Path
+
from devscripts.tomlparse import parse_toml
from devscripts.utils import read_file
@@ -17,17 +19,23 @@ from devscripts.utils import read_file
def parse_args():
parser = argparse.ArgumentParser(description='Install dependencies for yt-dlp')
parser.add_argument(
- 'input', nargs='?', metavar='TOMLFILE', default='pyproject.toml', help='Input file (default: %(default)s)')
+ 'input', nargs='?', metavar='TOMLFILE', default=Path(__file__).parent.parent / 'pyproject.toml',
+ help='input file (default: %(default)s)')
parser.add_argument(
- '-e', '--exclude', metavar='DEPENDENCY', action='append', help='Exclude a dependency')
+ '-e', '--exclude', metavar='DEPENDENCY', action='append',
+ help='exclude a dependency')
parser.add_argument(
- '-i', '--include', metavar='GROUP', action='append', help='Include an optional dependency group')
+ '-i', '--include', metavar='GROUP', action='append',
+ help='include an optional dependency group')
parser.add_argument(
- '-o', '--only-optional', action='store_true', help='Only install optional dependencies')
+ '-o', '--only-optional', action='store_true',
+ help='only install optional dependencies')
parser.add_argument(
- '-p', '--print', action='store_true', help='Only print a requirements.txt to stdout')
+ '-p', '--print', action='store_true',
+ help='only print requirements to stdout')
parser.add_argument(
- '-u', '--user', action='store_true', help='Install with pip as --user')
+ '-u', '--user', action='store_true',
+ help='install with pip as --user')
return parser.parse_args()
@@ -37,24 +45,16 @@ def main():
optional_groups = project_table['optional-dependencies']
excludes = args.exclude or []
- deps = []
+ targets = []
if not args.only_optional: # `-o` should exclude 'dependencies' and the 'default' group
- deps.extend(project_table['dependencies'])
+ targets.extend(project_table['dependencies'])
if 'default' not in excludes: # `--exclude default` should exclude entire 'default' group
- deps.extend(optional_groups['default'])
-
- def name(dependency):
- return re.match(r'[\w-]+', dependency)[0].lower()
-
- target_map = {name(dep): dep for dep in deps}
+ targets.extend(optional_groups['default'])
for include in filter(None, map(optional_groups.get, args.include or [])):
- target_map.update(zip(map(name, include), include))
-
- for exclude in map(name, excludes):
- target_map.pop(exclude, None)
+ targets.extend(include)
- targets = list(target_map.values())
+ targets = [t for t in targets if re.match(r'[\w-]+', t).group(0).lower() not in excludes]
if args.print:
for target in targets:
diff --git a/devscripts/make_changelog.py b/devscripts/make_changelog.py
index faab5fa..8e199e7 100644
--- a/devscripts/make_changelog.py
+++ b/devscripts/make_changelog.py
@@ -445,7 +445,32 @@ def get_new_contributors(contributors_path, commits):
return sorted(new_contributors, key=str.casefold)
-if __name__ == '__main__':
+def create_changelog(args):
+ logging.basicConfig(
+ datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
+ level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
+
+ commits = CommitRange(None, args.commitish, args.default_author)
+
+ if not args.no_override:
+ if args.override_path.exists():
+ overrides = json.loads(read_file(args.override_path))
+ commits.apply_overrides(overrides)
+ else:
+ logger.warning(f'File {args.override_path.as_posix()} does not exist')
+
+ logger.info(f'Loaded {len(commits)} commits')
+
+ new_contributors = get_new_contributors(args.contributors_path, commits)
+ if new_contributors:
+ if args.contributors:
+ write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
+ logger.info(f'New contributors: {", ".join(new_contributors)}')
+
+ return Changelog(commits.groups(), args.repo, args.collapsible)
+
+
+def create_parser():
import argparse
parser = argparse.ArgumentParser(
@@ -477,27 +502,9 @@ if __name__ == '__main__':
parser.add_argument(
'--collapsible', action='store_true',
help='make changelog collapsible (default: %(default)s)')
- args = parser.parse_args()
-
- logging.basicConfig(
- datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
- level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
-
- commits = CommitRange(None, args.commitish, args.default_author)
-
- if not args.no_override:
- if args.override_path.exists():
- overrides = json.loads(read_file(args.override_path))
- commits.apply_overrides(overrides)
- else:
- logger.warning(f'File {args.override_path.as_posix()} does not exist')
- logger.info(f'Loaded {len(commits)} commits')
+ return parser
- new_contributors = get_new_contributors(args.contributors_path, commits)
- if new_contributors:
- if args.contributors:
- write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
- logger.info(f'New contributors: {", ".join(new_contributors)}')
- print(Changelog(commits.groups(), args.repo, args.collapsible))
+if __name__ == '__main__':
+ print(create_changelog(create_parser().parse_args()))
diff --git a/devscripts/prepare_manpage.py b/devscripts/prepare_manpage.py
index 9b12e71..47188e9 100644
--- a/devscripts/prepare_manpage.py
+++ b/devscripts/prepare_manpage.py
@@ -24,7 +24,7 @@ PREFIX = r'''%yt-dlp(1)
# NAME
-yt\-dlp \- A youtube-dl fork with additional features and patches
+yt\-dlp \- A feature\-rich command\-line audio/video downloader
# SYNOPSIS
@@ -43,6 +43,27 @@ def filter_excluded_sections(readme):
'', readme)
+def _convert_code_blocks(readme):
+ current_code_block = None
+
+ for line in readme.splitlines(True):
+ if current_code_block:
+ if line == current_code_block:
+ current_code_block = None
+ yield '\n'
+ else:
+ yield f' {line}'
+ elif line.startswith('```'):
+ current_code_block = line.count('`') * '`' + '\n'
+ yield '\n'
+ else:
+ yield line
+
+
+def convert_code_blocks(readme):
+ return ''.join(_convert_code_blocks(readme))
+
+
def move_sections(readme):
MOVE_TAG_TEMPLATE = '<!-- MANPAGE: MOVE "%s" SECTION HERE -->'
sections = re.findall(r'(?m)^%s$' % (
@@ -65,8 +86,10 @@ def move_sections(readme):
def filter_options(readme):
section = re.search(r'(?sm)^# USAGE AND OPTIONS\n.+?(?=^# )', readme).group(0)
+ section_new = section.replace('*', R'\*')
+
options = '# OPTIONS\n'
- for line in section.split('\n')[1:]:
+ for line in section_new.split('\n')[1:]:
mobj = re.fullmatch(r'''(?x)
\s{4}(?P<opt>-(?:,\s|[^\s])+)
(?:\s(?P<meta>(?:[^\s]|\s(?!\s))+))?
@@ -86,7 +109,7 @@ def filter_options(readme):
return readme.replace(section, options, 1)
-TRANSFORM = compose_functions(filter_excluded_sections, move_sections, filter_options)
+TRANSFORM = compose_functions(filter_excluded_sections, convert_code_blocks, move_sections, filter_options)
def main():
diff --git a/devscripts/tomlparse.py b/devscripts/tomlparse.py
index 85ac4ee..ac9ea31 100755
--- a/devscripts/tomlparse.py
+++ b/devscripts/tomlparse.py
@@ -11,7 +11,7 @@ IMPORTANT: INVALID FILES OR MULTILINE STRINGS ARE NOT SUPPORTED!
from __future__ import annotations
-import datetime
+import datetime as dt
import json
import re
@@ -115,9 +115,9 @@ def parse_value(data: str, index: int):
for func in [
int,
float,
- datetime.time.fromisoformat,
- datetime.date.fromisoformat,
- datetime.datetime.fromisoformat,
+ dt.time.fromisoformat,
+ dt.date.fromisoformat,
+ dt.datetime.fromisoformat,
{'true': True, 'false': False}.get,
]:
try:
@@ -179,7 +179,7 @@ def main():
data = file.read()
def default(obj):
- if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)):
+ if isinstance(obj, (dt.date, dt.time, dt.datetime)):
return obj.isoformat()
print(json.dumps(parse_toml(data), default=default))
diff --git a/devscripts/update-version.py b/devscripts/update-version.py
index da54a6a..07a0717 100644
--- a/devscripts/update-version.py
+++ b/devscripts/update-version.py
@@ -9,15 +9,15 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import argparse
import contextlib
+import datetime as dt
import sys
-from datetime import datetime, timezone
from devscripts.utils import read_version, run_process, write_file
def get_new_version(version, revision):
if not version:
- version = datetime.now(timezone.utc).strftime('%Y.%m.%d')
+ version = dt.datetime.now(dt.timezone.utc).strftime('%Y.%m.%d')
if revision:
assert revision.isdecimal(), 'Revision must be a number'
diff --git a/devscripts/update_changelog.py b/devscripts/update_changelog.py
new file mode 100755
index 0000000..36b9a8e
--- /dev/null
+++ b/devscripts/update_changelog.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+
+# Allow direct execution
+import os
+import sys
+
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from pathlib import Path
+
+from devscripts.make_changelog import create_changelog, create_parser
+from devscripts.utils import read_file, read_version, write_file
+
+# Always run after devscripts/update-version.py, and run before `make doc|pypi-files|tar|all`
+
+if __name__ == '__main__':
+ parser = create_parser()
+ parser.description = 'Update an existing changelog file with an entry for a new release'
+ parser.add_argument(
+ '--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md',
+ help='path to the Changelog file')
+ args = parser.parse_args()
+ new_entry = create_changelog(args)
+
+ header, sep, changelog = read_file(args.changelog_path).partition('\n### ')
+ write_file(args.changelog_path, f'{header}{sep}{read_version()}\n{new_entry}\n{sep}{changelog}')
diff --git a/pyproject.toml b/pyproject.toml
index 64504ff..9faf53b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -10,7 +10,7 @@ maintainers = [
{name = "bashonly", email = "bashonly@protonmail.com"},
{name = "coletdjnz", email = "coletdjnz@protonmail.com"},
]
-description = "A youtube-dl fork with additional features and patches"
+description = "A feature-rich command-line audio/video downloader"
readme = "README.md"
requires-python = ">=3.8"
keywords = [
@@ -53,6 +53,7 @@ dependencies = [
[project.optional-dependencies]
default = []
+curl_cffi = ["curl-cffi==0.5.10; implementation_name=='cpython'"]
secretstorage = [
"cffi",
"secretstorage",
@@ -68,7 +69,10 @@ dev = [
"isort",
"pytest",
]
-pyinstaller = ["pyinstaller>=6.3"]
+pyinstaller = [
+ "pyinstaller>=6.3; sys_platform!='darwin'",
+ "pyinstaller==5.13.2; sys_platform=='darwin'", # needed for curl_cffi
+]
py2exe = ["py2exe>=0.12"]
[project.urls]
diff --git a/supportedsites.md b/supportedsites.md
index a4b2d57..ba77c0f 100644
--- a/supportedsites.md
+++ b/supportedsites.md
@@ -47,7 +47,7 @@
- **aenetworks:show**
- **AeonCo**
- **afreecatv**: [*afreecatv*](## "netrc machine") afreecatv.com
- - **afreecatv:live**: [*afreecatv*](## "netrc machine") afreecatv.com
+ - **afreecatv:live**: [*afreecatv*](## "netrc machine") afreecatv.com livestreams
- **afreecatv:user**
- **AirTV**
- **AitubeKZVideo**
@@ -105,6 +105,7 @@
- **ArteTVPlaylist**
- **asobichannel**: ASOBI CHANNEL
- **asobichannel:tag**: ASOBI CHANNEL
+ - **AsobiStage**: ASOBISTAGE (アソビステージ)
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
- **AtScaleConfEvent**
- **ATVAt**
@@ -436,6 +437,7 @@
- **FacebookPluginsVideo**
- **fancode:live**: [*fancode*](## "netrc machine") (**Currently broken**)
- **fancode:vod**: [*fancode*](## "netrc machine") (**Currently broken**)
+ - **Fathom**
- **faz.net**
- **fc2**: [*fc2*](## "netrc machine")
- **fc2:embed**
@@ -633,8 +635,9 @@
- **Jamendo**
- **JamendoAlbum**
- **JeuxVideo**: (**Currently broken**)
- - **JioSaavnAlbum**
- - **JioSaavnSong**
+ - **jiosaavn:album**
+ - **jiosaavn:playlist**
+ - **jiosaavn:song**
- **Joj**
- **JoqrAg**: 超!A&G+ 文化放送 (f.k.a. AGQR) Nippon Cultural Broadcasting, Inc. (JOQR)
- **Jove**
@@ -716,6 +719,8 @@
- **Lnk**
- **LnkGo**
- **loc**: Library of Congress
+ - **loom**
+ - **loom:folder**
- **LoveHomePorn**
- **LRTStream**
- **LRTVOD**
@@ -1136,6 +1141,7 @@
- **Radiko**
- **RadikoRadio**
- **radio.de**: (**Currently broken**)
+ - **Radio1Be**
- **radiocanada**
- **radiocanada:audiovideo**
- **RadioComercial**
@@ -1288,6 +1294,7 @@
- **SeznamZpravyArticle**
- **Shahid**: [*shahid*](## "netrc machine")
- **ShahidShow**
+ - **SharePoint**
- **ShareVideosEmbed**
- **ShemarooMe**
- **ShowRoomLive**
diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py
index 6be47af..5242cf8 100644
--- a/test/test_YoutubeDL.py
+++ b/test/test_YoutubeDL.py
@@ -183,7 +183,7 @@ class TestFormatSelection(unittest.TestCase):
]
info_dict = _make_result(formats)
- ydl = YDL({'format': 'best'})
+ ydl = YDL({'format': 'best', 'format_sort': ['abr', 'ext']})
ydl.sort_formats(info_dict)
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
@@ -195,7 +195,7 @@ class TestFormatSelection(unittest.TestCase):
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'mp3-64')
- ydl = YDL({'prefer_free_formats': True})
+ ydl = YDL({'prefer_free_formats': True, 'format_sort': ['abr', 'ext']})
ydl.sort_formats(info_dict)
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
diff --git a/test/test_cookies.py b/test/test_cookies.py
index 5282ef6..bd61f30 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,5 +1,5 @@
+import datetime as dt
import unittest
-from datetime import datetime, timezone
from yt_dlp import cookies
from yt_dlp.cookies import (
@@ -138,7 +138,7 @@ class TestCookies(unittest.TestCase):
self.assertEqual(cookie.name, 'foo')
self.assertEqual(cookie.value, 'test%20%3Bcookie')
self.assertFalse(cookie.secure)
- expected_expiration = datetime(2021, 6, 18, 21, 39, 19, tzinfo=timezone.utc)
+ expected_expiration = dt.datetime(2021, 6, 18, 21, 39, 19, tzinfo=dt.timezone.utc)
self.assertEqual(cookie.expires, int(expected_expiration.timestamp()))
def test_pbkdf2_sha1(self):
diff --git a/test/test_networking.py b/test/test_networking.py
index 628f1f1..b50f70d 100644
--- a/test/test_networking.py
+++ b/test/test_networking.py
@@ -27,9 +27,10 @@ import zlib
from email.message import Message
from http.cookiejar import CookieJar
+from test.conftest import validate_and_send
from test.helper import FakeYDL, http_server_port, verify_address_availability
from yt_dlp.cookies import YoutubeDLCookieJar
-from yt_dlp.dependencies import brotli, requests, urllib3
+from yt_dlp.dependencies import brotli, curl_cffi, requests, urllib3
from yt_dlp.networking import (
HEADRequest,
PUTRequest,
@@ -50,10 +51,13 @@ from yt_dlp.networking.exceptions import (
TransportError,
UnsupportedRequest,
)
+from yt_dlp.networking.impersonate import (
+ ImpersonateRequestHandler,
+ ImpersonateTarget,
+)
+from yt_dlp.utils import YoutubeDLError
from yt_dlp.utils._utils import _YDLLogger as FakeLogger
-from yt_dlp.utils.networking import HTTPHeaderDict
-
-from test.conftest import validate_and_send
+from yt_dlp.utils.networking import HTTPHeaderDict, std_headers
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -75,6 +79,7 @@ def _build_proxy_handler(name):
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
+ default_request_version = 'HTTP/1.1'
def log_message(self, format, *args):
pass
@@ -112,6 +117,8 @@ class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
def _read_data(self):
if 'Content-Length' in self.headers:
return self.rfile.read(int(self.headers['Content-Length']))
+ else:
+ return b''
def do_POST(self):
data = self._read_data() + str(self.headers).encode()
@@ -195,7 +202,8 @@ class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
self._headers()
elif self.path.startswith('/308-to-headers'):
self.send_response(308)
- self.send_header('Location', '/headers')
+ # redirect to "localhost" for testing cookie redirection handling
+ self.send_header('Location', f'http://localhost:{self.connection.getsockname()[1]}/headers')
self.send_header('Content-Length', '0')
self.end_headers()
elif self.path == '/trailing_garbage':
@@ -310,7 +318,7 @@ class TestRequestHandlerBase:
class TestHTTPRequestHandler(TestRequestHandlerBase):
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_verify_cert(self, handler):
with handler() as rh:
with pytest.raises(CertificateVerifyError):
@@ -321,7 +329,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert r.status == 200
r.close()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_ssl_error(self, handler):
# HTTPS server with too old TLS version
# XXX: is there a better way to test this than to create a new server?
@@ -335,11 +343,11 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
https_server_thread.start()
with handler(verify=False) as rh:
- with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
+ with pytest.raises(SSLError, match=r'(?i)ssl(?:v3|/tls).alert.handshake.failure') as exc_info:
validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers'))
assert not issubclass(exc_info.type, CertificateVerifyError)
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_percent_encode(self, handler):
with handler() as rh:
# Unicode characters should be encoded with uppercase percent-encoding
@@ -351,7 +359,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res.status == 200
res.close()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
@pytest.mark.parametrize('path', [
'/a/b/./../../headers',
'/redirect_dotsegments',
@@ -367,6 +375,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res.url == f'http://127.0.0.1:{self.http_port}/headers'
res.close()
+ # Not supported by CurlCFFI (non-standard)
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
def test_unicode_path_redirection(self, handler):
with handler() as rh:
@@ -374,7 +383,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html'
r.close()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_raise_http_error(self, handler):
with handler() as rh:
for bad_status in (400, 500, 599, 302):
@@ -384,7 +393,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
# Should not raise an error
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_response_url(self, handler):
with handler() as rh:
# Response url should be that of the last url in redirect chain
@@ -395,62 +404,50 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res2.url == f'http://127.0.0.1:{self.http_port}/gen_200'
res2.close()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
- def test_redirect(self, handler):
+ # Covers some basic cases we expect some level of consistency between request handlers for
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
+ @pytest.mark.parametrize('redirect_status,method,expected', [
+ # A 303 must either use GET or HEAD for subsequent request
+ (303, 'POST', ('', 'GET', False)),
+ (303, 'HEAD', ('', 'HEAD', False)),
+
+ # 301 and 302 turn POST only into a GET
+ (301, 'POST', ('', 'GET', False)),
+ (301, 'HEAD', ('', 'HEAD', False)),
+ (302, 'POST', ('', 'GET', False)),
+ (302, 'HEAD', ('', 'HEAD', False)),
+
+ # 307 and 308 should not change method
+ (307, 'POST', ('testdata', 'POST', True)),
+ (308, 'POST', ('testdata', 'POST', True)),
+ (307, 'HEAD', ('', 'HEAD', False)),
+ (308, 'HEAD', ('', 'HEAD', False)),
+ ])
+ def test_redirect(self, handler, redirect_status, method, expected):
with handler() as rh:
- def do_req(redirect_status, method, assert_no_content=False):
- data = b'testdata' if method in ('POST', 'PUT') else None
- res = validate_and_send(
- rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_{redirect_status}', method=method, data=data))
-
- headers = b''
- data_sent = b''
- if data is not None:
- data_sent += res.read(len(data))
- if data_sent != data:
- headers += data_sent
- data_sent = b''
-
- headers += res.read()
-
- if assert_no_content or data is None:
- assert b'Content-Type' not in headers
- assert b'Content-Length' not in headers
- else:
- assert b'Content-Type' in headers
- assert b'Content-Length' in headers
-
- return data_sent.decode(), res.headers.get('method', '')
-
- # A 303 must either use GET or HEAD for subsequent request
- assert do_req(303, 'POST', True) == ('', 'GET')
- assert do_req(303, 'HEAD') == ('', 'HEAD')
-
- assert do_req(303, 'PUT', True) == ('', 'GET')
-
- # 301 and 302 turn POST only into a GET
- assert do_req(301, 'POST', True) == ('', 'GET')
- assert do_req(301, 'HEAD') == ('', 'HEAD')
- assert do_req(302, 'POST', True) == ('', 'GET')
- assert do_req(302, 'HEAD') == ('', 'HEAD')
-
- assert do_req(301, 'PUT') == ('testdata', 'PUT')
- assert do_req(302, 'PUT') == ('testdata', 'PUT')
+ data = b'testdata' if method == 'POST' else None
+ headers = {}
+ if data is not None:
+ headers['Content-Type'] = 'application/test'
+ res = validate_and_send(
+ rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_{redirect_status}', method=method, data=data,
+ headers=headers))
- # 307 and 308 should not change method
- for m in ('POST', 'PUT'):
- assert do_req(307, m) == ('testdata', m)
- assert do_req(308, m) == ('testdata', m)
+ headers = b''
+ data_recv = b''
+ if data is not None:
+ data_recv += res.read(len(data))
+ if data_recv != data:
+ headers += data_recv
+ data_recv = b''
- assert do_req(307, 'HEAD') == ('', 'HEAD')
- assert do_req(308, 'HEAD') == ('', 'HEAD')
+ headers += res.read()
- # These should not redirect and instead raise an HTTPError
- for code in (300, 304, 305, 306):
- with pytest.raises(HTTPError):
- do_req(code, 'GET')
+ assert expected[0] == data_recv.decode()
+ assert expected[1] == res.headers.get('method')
+ assert expected[2] == ('content-length' in headers.decode().lower())
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_request_cookie_header(self, handler):
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
with handler() as rh:
@@ -459,16 +456,17 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
rh, Request(
f'http://127.0.0.1:{self.http_port}/headers',
headers={'Cookie': 'test=test'})).read().decode()
- assert 'Cookie: test=test' in res
+ assert 'cookie: test=test' in res.lower()
# Specified Cookie header should be removed on any redirect
res = validate_and_send(
rh, Request(
f'http://127.0.0.1:{self.http_port}/308-to-headers',
- headers={'Cookie': 'test=test'})).read().decode()
- assert 'Cookie: test=test' not in res
+ headers={'Cookie': 'test=test2'})).read().decode()
+ assert 'cookie: test=test2' not in res.lower()
# Specified Cookie header should override global cookiejar for that request
+ # Whether cookies from the cookiejar is applied on the redirect is considered undefined for now
cookiejar = YoutubeDLCookieJar()
cookiejar.set_cookie(http.cookiejar.Cookie(
version=0, name='test', value='ytdlp', port=None, port_specified=False,
@@ -478,23 +476,23 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
with handler(cookiejar=cookiejar) as rh:
data = validate_and_send(
- rh, Request(f'http://127.0.0.1:{self.http_port}/headers', headers={'cookie': 'test=test'})).read()
- assert b'Cookie: test=ytdlp' not in data
- assert b'Cookie: test=test' in data
+ rh, Request(f'http://127.0.0.1:{self.http_port}/headers', headers={'cookie': 'test=test3'})).read()
+ assert b'cookie: test=ytdlp' not in data.lower()
+ assert b'cookie: test=test3' in data.lower()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_redirect_loop(self, handler):
with handler() as rh:
with pytest.raises(HTTPError, match='redirect loop'):
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop'))
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_incompleteread(self, handler):
with handler(timeout=2) as rh:
- with pytest.raises(IncompleteRead):
+ with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'):
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_cookies(self, handler):
cookiejar = YoutubeDLCookieJar()
cookiejar.set_cookie(http.cookiejar.Cookie(
@@ -503,47 +501,66 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
with handler(cookiejar=cookiejar) as rh:
data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read()
- assert b'Cookie: test=ytdlp' in data
+ assert b'cookie: test=ytdlp' in data.lower()
# Per request
with handler() as rh:
data = validate_and_send(
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read()
- assert b'Cookie: test=ytdlp' in data
+ assert b'cookie: test=ytdlp' in data.lower()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_headers(self, handler):
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
# Global Headers
- data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read()
- assert b'Test1: test' in data
+ data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read().lower()
+ assert b'test1: test' in data
# Per request headers, merged with global
data = validate_and_send(rh, Request(
- f'http://127.0.0.1:{self.http_port}/headers', headers={'test2': 'changed', 'test3': 'test3'})).read()
- assert b'Test1: test' in data
- assert b'Test2: changed' in data
- assert b'Test2: test2' not in data
- assert b'Test3: test3' in data
-
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
- def test_timeout(self, handler):
+ f'http://127.0.0.1:{self.http_port}/headers', headers={'test2': 'changed', 'test3': 'test3'})).read().lower()
+ assert b'test1: test' in data
+ assert b'test2: changed' in data
+ assert b'test2: test2' not in data
+ assert b'test3: test3' in data
+
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
+ def test_read_timeout(self, handler):
with handler() as rh:
# Default timeout is 20 seconds, so this should go through
validate_and_send(
- rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_3'))
+ rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1'))
- with handler(timeout=0.5) as rh:
+ with handler(timeout=0.1) as rh:
with pytest.raises(TransportError):
validate_and_send(
- rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1'))
+ rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_5'))
# Per request timeout, should override handler timeout
validate_and_send(
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4}))
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
+ def test_connect_timeout(self, handler):
+ # nothing should be listening on this port
+ connect_timeout_url = 'http://10.255.255.255'
+ with handler(timeout=0.01) as rh:
+ now = time.time()
+ with pytest.raises(TransportError):
+ validate_and_send(
+ rh, Request(connect_timeout_url))
+ assert 0.01 <= time.time() - now < 20
+
+ with handler() as rh:
+ with pytest.raises(TransportError):
+ # Per request timeout, should override handler timeout
+ now = time.time()
+ validate_and_send(
+ rh, Request(connect_timeout_url, extensions={'timeout': 0.01}))
+ assert 0.01 <= time.time() - now < 20
+
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_source_address(self, handler):
source_address = f'127.0.0.{random.randint(5, 255)}'
# on some systems these loopback addresses we need for testing may not be available
@@ -554,6 +571,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
rh, Request(f'http://127.0.0.1:{self.http_port}/source_address')).read().decode()
assert source_address == data
+ # Not supported by CurlCFFI
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
def test_gzip_trailing_garbage(self, handler):
with handler() as rh:
@@ -571,7 +589,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res.headers.get('Content-Encoding') == 'br'
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_deflate(self, handler):
with handler() as rh:
res = validate_and_send(
@@ -581,7 +599,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res.headers.get('Content-Encoding') == 'deflate'
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_gzip(self, handler):
with handler() as rh:
res = validate_and_send(
@@ -591,7 +609,7 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res.headers.get('Content-Encoding') == 'gzip'
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_multiple_encodings(self, handler):
with handler() as rh:
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
@@ -602,17 +620,18 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res.headers.get('Content-Encoding') == pair
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
+ # Not supported by curl_cffi
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
def test_unsupported_encoding(self, handler):
with handler() as rh:
res = validate_and_send(
rh, Request(
f'http://127.0.0.1:{self.http_port}/content-encoding',
- headers={'ytdl-encoding': 'unsupported'}))
+ headers={'ytdl-encoding': 'unsupported', 'Accept-Encoding': '*'}))
assert res.headers.get('Content-Encoding') == 'unsupported'
assert res.read() == b'raw'
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_read(self, handler):
with handler() as rh:
res = validate_and_send(
@@ -620,9 +639,12 @@ class TestHTTPRequestHandler(TestRequestHandlerBase):
assert res.readable()
assert res.read(1) == b'H'
assert res.read(3) == b'ost'
+ assert res.read().decode().endswith('\n\n')
+ assert res.read() == b''
class TestHTTPProxy(TestRequestHandlerBase):
+ # Note: this only tests http urls over non-CONNECT proxy
@classmethod
def setup_class(cls):
super().setup_class()
@@ -642,7 +664,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
cls.geo_proxy_thread.daemon = True
cls.geo_proxy_thread.start()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_http_proxy(self, handler):
http_proxy = f'http://127.0.0.1:{self.proxy_port}'
geo_proxy = f'http://127.0.0.1:{self.geo_port}'
@@ -668,7 +690,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
assert res != f'normal: {real_url}'
assert 'Accept' in res
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_noproxy(self, handler):
with handler(proxies={'proxy': f'http://127.0.0.1:{self.proxy_port}'}) as rh:
# NO_PROXY
@@ -678,7 +700,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
'utf-8')
assert 'Accept' in nop_response
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_allproxy(self, handler):
url = 'http://foo.com/bar'
with handler() as rh:
@@ -686,7 +708,7 @@ class TestHTTPProxy(TestRequestHandlerBase):
'utf-8')
assert response == f'normal: {url}'
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_http_proxy_with_idn(self, handler):
with handler(proxies={
'http': f'http://127.0.0.1:{self.proxy_port}',
@@ -698,7 +720,6 @@ class TestHTTPProxy(TestRequestHandlerBase):
class TestClientCertificate:
-
@classmethod
def setup_class(cls):
certfn = os.path.join(TEST_DIR, 'testcert.pem')
@@ -724,27 +745,27 @@ class TestClientCertificate:
) as rh:
validate_and_send(rh, Request(f'https://127.0.0.1:{self.port}/video.html')).read().decode()
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_certificate_combined_nopass(self, handler):
self._run_test(handler, client_cert={
'client_certificate': os.path.join(self.certdir, 'clientwithkey.crt'),
})
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_certificate_nocombined_nopass(self, handler):
self._run_test(handler, client_cert={
'client_certificate': os.path.join(self.certdir, 'client.crt'),
'client_certificate_key': os.path.join(self.certdir, 'client.key'),
})
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_certificate_combined_pass(self, handler):
self._run_test(handler, client_cert={
'client_certificate': os.path.join(self.certdir, 'clientwithencryptedkey.crt'),
'client_certificate_password': 'foobar',
})
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_certificate_nocombined_pass(self, handler):
self._run_test(handler, client_cert={
'client_certificate': os.path.join(self.certdir, 'client.crt'),
@@ -753,6 +774,18 @@ class TestClientCertificate:
})
+@pytest.mark.parametrize('handler', ['CurlCFFI'], indirect=True)
+class TestHTTPImpersonateRequestHandler(TestRequestHandlerBase):
+ def test_supported_impersonate_targets(self, handler):
+ with handler(headers=std_headers) as rh:
+ # note: this assumes the impersonate request handler supports the impersonate extension
+ for target in rh.supported_targets:
+ res = validate_and_send(rh, Request(
+ f'http://127.0.0.1:{self.http_port}/headers', extensions={'impersonate': target}))
+ assert res.status == 200
+ assert std_headers['user-agent'].lower() not in res.read().decode().lower()
+
+
class TestRequestHandlerMisc:
"""Misc generic tests for request handlers, not related to request or validation testing"""
@pytest.mark.parametrize('handler,logger_name', [
@@ -931,6 +964,172 @@ class TestRequestsRequestHandler(TestRequestHandlerBase):
assert called
+@pytest.mark.parametrize('handler', ['CurlCFFI'], indirect=True)
+class TestCurlCFFIRequestHandler(TestRequestHandlerBase):
+
+ @pytest.mark.parametrize('params,extensions', [
+ ({}, {'impersonate': ImpersonateTarget('chrome')}),
+ ({'impersonate': ImpersonateTarget('chrome', '110')}, {}),
+ ({'impersonate': ImpersonateTarget('chrome', '99')}, {'impersonate': ImpersonateTarget('chrome', '110')}),
+ ])
+ def test_impersonate(self, handler, params, extensions):
+ with handler(headers=std_headers, **params) as rh:
+ res = validate_and_send(
+ rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions=extensions)).read().decode()
+ assert 'sec-ch-ua: "Chromium";v="110"' in res
+ # Check that user agent is added over ours
+ assert 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36' in res
+
+ def test_headers(self, handler):
+ with handler(headers=std_headers) as rh:
+ # Ensure curl-impersonate overrides our standard headers (usually added
+ res = validate_and_send(
+ rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={
+ 'impersonate': ImpersonateTarget('safari')}, headers={'x-custom': 'test', 'sec-fetch-mode': 'custom'})).read().decode().lower()
+
+ assert std_headers['user-agent'].lower() not in res
+ assert std_headers['accept-language'].lower() not in res
+ assert std_headers['sec-fetch-mode'].lower() not in res
+ # other than UA, custom headers that differ from std_headers should be kept
+ assert 'sec-fetch-mode: custom' in res
+ assert 'x-custom: test' in res
+ # but when not impersonating don't remove std_headers
+ res = validate_and_send(
+ rh, Request(f'http://127.0.0.1:{self.http_port}/headers', headers={'x-custom': 'test'})).read().decode().lower()
+ # std_headers should be present
+ for k, v in std_headers.items():
+ assert f'{k}: {v}'.lower() in res
+
+ @pytest.mark.parametrize('raised,expected,match', [
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.PARTIAL_FILE), IncompleteRead, None),
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.OPERATION_TIMEDOUT), TransportError, None),
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.RECV_ERROR), TransportError, None),
+ ])
+ def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
+ import curl_cffi.requests
+
+ from yt_dlp.networking._curlcffi import CurlCFFIResponseAdapter
+ curl_res = curl_cffi.requests.Response()
+ res = CurlCFFIResponseAdapter(curl_res)
+
+ def mock_read(*args, **kwargs):
+ try:
+ raise raised()
+ except Exception as e:
+ e.response = curl_res
+ raise
+ monkeypatch.setattr(res.fp, 'read', mock_read)
+
+ with pytest.raises(expected, match=match) as exc_info:
+ res.read()
+
+ assert exc_info.type is expected
+
+ @pytest.mark.parametrize('raised,expected,match', [
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.OPERATION_TIMEDOUT), TransportError, None),
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.PEER_FAILED_VERIFICATION), CertificateVerifyError, None),
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.SSL_CONNECT_ERROR), SSLError, None),
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.TOO_MANY_REDIRECTS), HTTPError, None),
+ (lambda: curl_cffi.requests.errors.RequestsError(
+ '', code=curl_cffi.const.CurlECode.PROXY), ProxyError, None),
+ ])
+ def test_request_error_mapping(self, handler, monkeypatch, raised, expected, match):
+ import curl_cffi.requests
+ curl_res = curl_cffi.requests.Response()
+ curl_res.status_code = 301
+
+ with handler() as rh:
+ original_get_instance = rh._get_instance
+
+ def mock_get_instance(*args, **kwargs):
+ instance = original_get_instance(*args, **kwargs)
+
+ def request(*_, **__):
+ try:
+ raise raised()
+ except Exception as e:
+ e.response = curl_res
+ raise
+ monkeypatch.setattr(instance, 'request', request)
+ return instance
+
+ monkeypatch.setattr(rh, '_get_instance', mock_get_instance)
+
+ with pytest.raises(expected) as exc_info:
+ rh.send(Request('http://fake'))
+
+ assert exc_info.type is expected
+
+ def test_response_reader(self, handler):
+ class FakeResponse:
+ def __init__(self, raise_error=False):
+ self.raise_error = raise_error
+ self.closed = False
+
+ def iter_content(self):
+ yield b'foo'
+ yield b'bar'
+ yield b'z'
+ if self.raise_error:
+ raise Exception('test')
+
+ def close(self):
+ self.closed = True
+
+ from yt_dlp.networking._curlcffi import CurlCFFIResponseReader
+
+ res = CurlCFFIResponseReader(FakeResponse())
+ assert res.readable
+ assert res.bytes_read == 0
+ assert res.read(1) == b'f'
+ assert res.bytes_read == 3
+ assert res._buffer == b'oo'
+
+ assert res.read(2) == b'oo'
+ assert res.bytes_read == 3
+ assert res._buffer == b''
+
+ assert res.read(2) == b'ba'
+ assert res.bytes_read == 6
+ assert res._buffer == b'r'
+
+ assert res.read(3) == b'rz'
+ assert res.bytes_read == 7
+ assert res._buffer == b''
+ assert res.closed
+ assert res._response.closed
+
+ # should handle no size param
+ res2 = CurlCFFIResponseReader(FakeResponse())
+ assert res2.read() == b'foobarz'
+ assert res2.bytes_read == 7
+ assert res2._buffer == b''
+ assert res2.closed
+
+ # should close on an exception
+ res3 = CurlCFFIResponseReader(FakeResponse(raise_error=True))
+ with pytest.raises(Exception, match='test'):
+ res3.read()
+ assert res3._buffer == b''
+ assert res3.bytes_read == 7
+ assert res3.closed
+
+ # buffer should be cleared on close
+ res4 = CurlCFFIResponseReader(FakeResponse())
+ res4.read(2)
+ assert res4._buffer == b'o'
+ res4.close()
+ assert res4.closed
+ assert res4._buffer == b''
+
+
def run_validation(handler, error, req, **handler_kwargs):
with handler(**handler_kwargs) as rh:
if error:
@@ -975,6 +1174,10 @@ class TestRequestHandlerValidation:
('ws', False, {}),
('wss', False, {}),
]),
+ ('CurlCFFI', [
+ ('http', False, {}),
+ ('https', False, {}),
+ ]),
(NoCheckRH, [('http', False, {})]),
(ValidationRH, [('http', UnsupportedRequest, {})])
]
@@ -998,6 +1201,14 @@ class TestRequestHandlerValidation:
('socks5', False),
('socks5h', False),
]),
+ ('CurlCFFI', 'http', [
+ ('http', False),
+ ('https', False),
+ ('socks4', False),
+ ('socks4a', False),
+ ('socks5', False),
+ ('socks5h', False),
+ ]),
(NoCheckRH, 'http', [('http', False)]),
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
('Websockets', 'ws', [('http', UnsupportedRequest)]),
@@ -1015,6 +1226,10 @@ class TestRequestHandlerValidation:
('all', False),
('unrelated', False),
]),
+ ('CurlCFFI', [
+ ('all', False),
+ ('unrelated', False),
+ ]),
(NoCheckRH, [('all', False)]),
(HTTPSupportedRH, [('all', UnsupportedRequest)]),
(HTTPSupportedRH, [('no', UnsupportedRequest)]),
@@ -1036,6 +1251,19 @@ class TestRequestHandlerValidation:
({'timeout': 'notatimeout'}, AssertionError),
({'unsupported': 'value'}, UnsupportedRequest),
]),
+ ('CurlCFFI', 'http', [
+ ({'cookiejar': 'notacookiejar'}, AssertionError),
+ ({'cookiejar': YoutubeDLCookieJar()}, False),
+ ({'timeout': 1}, False),
+ ({'timeout': 'notatimeout'}, AssertionError),
+ ({'unsupported': 'value'}, UnsupportedRequest),
+ ({'impersonate': ImpersonateTarget('badtarget', None, None, None)}, UnsupportedRequest),
+ ({'impersonate': 123}, AssertionError),
+ ({'impersonate': ImpersonateTarget('chrome', None, None, None)}, False),
+ ({'impersonate': ImpersonateTarget(None, None, None, None)}, False),
+ ({'impersonate': ImpersonateTarget()}, False),
+ ({'impersonate': 'chrome'}, AssertionError)
+ ]),
(NoCheckRH, 'http', [
({'cookiejar': 'notacookiejar'}, False),
({'somerandom': 'test'}, False), # but any extension is allowed through
@@ -1055,7 +1283,7 @@ class TestRequestHandlerValidation:
def test_url_scheme(self, handler, scheme, fail, handler_kwargs):
run_validation(handler, fail, Request(f'{scheme}://'), **(handler_kwargs or {}))
- @pytest.mark.parametrize('handler,fail', [('Urllib', False), ('Requests', False)], indirect=['handler'])
+ @pytest.mark.parametrize('handler,fail', [('Urllib', False), ('Requests', False), ('CurlCFFI', False)], indirect=['handler'])
def test_no_proxy(self, handler, fail):
run_validation(handler, fail, Request('http://', proxies={'no': '127.0.0.1,github.com'}))
run_validation(handler, fail, Request('http://'), proxies={'no': '127.0.0.1,github.com'})
@@ -1078,13 +1306,13 @@ class TestRequestHandlerValidation:
run_validation(handler, fail, Request(f'{req_scheme}://', proxies={req_scheme: f'{scheme}://example.com'}))
run_validation(handler, fail, Request(f'{req_scheme}://'), proxies={req_scheme: f'{scheme}://example.com'})
- @pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH, 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH, 'Requests', 'CurlCFFI'], indirect=True)
def test_empty_proxy(self, handler):
run_validation(handler, False, Request('http://', proxies={'http': None}))
run_validation(handler, False, Request('http://'), proxies={'http': None})
@pytest.mark.parametrize('proxy_url', ['//example.com', 'example.com', '127.0.0.1', '/a/b/c'])
- @pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
+ @pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
def test_invalid_proxy_url(self, handler, proxy_url):
run_validation(handler, UnsupportedRequest, Request('http://', proxies={'http': proxy_url}))
@@ -1113,6 +1341,10 @@ class FakeResponse(Response):
class FakeRH(RequestHandler):
+ def __init__(self, *args, **params):
+ self.params = params
+ super().__init__(*args, **params)
+
def _validate(self, request):
return
@@ -1271,15 +1503,10 @@ class TestYoutubeDLNetworking:
('', {'all': '__noproxy__'}),
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}) # env, set https
])
- def test_proxy(self, proxy, expected):
- old_http_proxy = os.environ.get('HTTP_PROXY')
- try:
- os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8081' # ensure that provided proxies override env
- with FakeYDL({'proxy': proxy}) as ydl:
- assert ydl.proxies == expected
- finally:
- if old_http_proxy:
- os.environ['HTTP_PROXY'] = old_http_proxy
+ def test_proxy(self, proxy, expected, monkeypatch):
+ monkeypatch.setenv('HTTP_PROXY', 'http://127.0.0.1:8081')
+ with FakeYDL({'proxy': proxy}) as ydl:
+ assert ydl.proxies == expected
def test_compat_request(self):
with FakeRHYDL() as ydl:
@@ -1331,6 +1558,95 @@ class TestYoutubeDLNetworking:
with pytest.raises(SSLError, match='testerror'):
ydl.urlopen('ssl://testerror')
+ def test_unsupported_impersonate_target(self):
+ class FakeImpersonationRHYDL(FakeYDL):
+ def __init__(self, *args, **kwargs):
+ class HTTPRH(RequestHandler):
+ def _send(self, request: Request):
+ pass
+ _SUPPORTED_URL_SCHEMES = ('http',)
+ _SUPPORTED_PROXY_SCHEMES = None
+
+ super().__init__(*args, **kwargs)
+ self._request_director = self.build_request_director([HTTPRH])
+
+ with FakeImpersonationRHYDL() as ydl:
+ with pytest.raises(
+ RequestError,
+ match=r'Impersonate target "test" is not available'
+ ):
+ ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
+
+ def test_unsupported_impersonate_extension(self):
+ class FakeHTTPRHYDL(FakeYDL):
+ def __init__(self, *args, **kwargs):
+ class IRH(ImpersonateRequestHandler):
+ def _send(self, request: Request):
+ pass
+
+ _SUPPORTED_URL_SCHEMES = ('http',)
+ _SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc',): 'test'}
+ _SUPPORTED_PROXY_SCHEMES = None
+
+ super().__init__(*args, **kwargs)
+ self._request_director = self.build_request_director([IRH])
+
+ with FakeHTTPRHYDL() as ydl:
+ with pytest.raises(
+ RequestError,
+ match=r'Impersonate target "test" is not available'
+ ):
+ ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
+
+ def test_raise_impersonate_error(self):
+ with pytest.raises(
+ YoutubeDLError,
+ match=r'Impersonate target "test" is not available'
+ ):
+ FakeYDL({'impersonate': ImpersonateTarget('test', None, None, None)})
+
+ def test_pass_impersonate_param(self, monkeypatch):
+
+ class IRH(ImpersonateRequestHandler):
+ def _send(self, request: Request):
+ pass
+
+ _SUPPORTED_URL_SCHEMES = ('http',)
+ _SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc'): 'test'}
+
+ # Bypass the check on initialize
+ brh = FakeYDL.build_request_director
+ monkeypatch.setattr(FakeYDL, 'build_request_director', lambda cls, handlers, preferences=None: brh(cls, handlers=[IRH]))
+
+ with FakeYDL({
+ 'impersonate': ImpersonateTarget('abc', None, None, None)
+ }) as ydl:
+ rh = self.build_handler(ydl, IRH)
+ assert rh.impersonate == ImpersonateTarget('abc', None, None, None)
+
+ def test_get_impersonate_targets(self):
+ handlers = []
+ for target_client in ('abc', 'xyz', 'asd'):
+ class TestRH(ImpersonateRequestHandler):
+ def _send(self, request: Request):
+ pass
+ _SUPPORTED_URL_SCHEMES = ('http',)
+ _SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client,): 'test'}
+ RH_KEY = target_client
+ RH_NAME = target_client
+ handlers.append(TestRH)
+
+ with FakeYDL() as ydl:
+ ydl._request_director = ydl.build_request_director(handlers)
+ assert set(ydl._get_available_impersonate_targets()) == {
+ (ImpersonateTarget('xyz'), 'xyz'),
+ (ImpersonateTarget('abc'), 'abc'),
+ (ImpersonateTarget('asd'), 'asd')
+ }
+ assert ydl._impersonate_target_available(ImpersonateTarget('abc'))
+ assert ydl._impersonate_target_available(ImpersonateTarget())
+ assert not ydl._impersonate_target_available(ImpersonateTarget('zxy'))
+
@pytest.mark.parametrize('proxy_key,proxy_url,expected', [
('http', '__noproxy__', None),
('no', '127.0.0.1,foo.bar', '127.0.0.1,foo.bar'),
@@ -1341,23 +1657,17 @@ class TestYoutubeDLNetworking:
('http', 'socks4://example.com', 'socks4://example.com'),
('unrelated', '/bad/proxy', '/bad/proxy'), # clean_proxies should ignore bad proxies
])
- def test_clean_proxy(self, proxy_key, proxy_url, expected):
+ def test_clean_proxy(self, proxy_key, proxy_url, expected, monkeypatch):
# proxies should be cleaned in urlopen()
with FakeRHYDL() as ydl:
req = ydl.urlopen(Request('test://', proxies={proxy_key: proxy_url})).request
assert req.proxies[proxy_key] == expected
# and should also be cleaned when building the handler
- env_key = f'{proxy_key.upper()}_PROXY'
- old_env_proxy = os.environ.get(env_key)
- try:
- os.environ[env_key] = proxy_url # ensure that provided proxies override env
- with FakeYDL() as ydl:
- rh = self.build_handler(ydl)
- assert rh.proxies[proxy_key] == expected
- finally:
- if old_env_proxy:
- os.environ[env_key] = old_env_proxy
+ monkeypatch.setenv(f'{proxy_key.upper()}_PROXY', proxy_url)
+ with FakeYDL() as ydl:
+ rh = self.build_handler(ydl)
+ assert rh.proxies[proxy_key] == expected
def test_clean_proxy_header(self):
with FakeRHYDL() as ydl:
@@ -1629,3 +1939,71 @@ class TestResponse:
assert res.geturl() == res.url
assert res.info() is res.headers
assert res.getheader('test') == res.get_header('test')
+
+
+class TestImpersonateTarget:
+ @pytest.mark.parametrize('target_str,expected', [
+ ('abc', ImpersonateTarget('abc', None, None, None)),
+ ('abc-120_esr', ImpersonateTarget('abc', '120_esr', None, None)),
+ ('abc-120:xyz', ImpersonateTarget('abc', '120', 'xyz', None)),
+ ('abc-120:xyz-5.6', ImpersonateTarget('abc', '120', 'xyz', '5.6')),
+ ('abc:xyz', ImpersonateTarget('abc', None, 'xyz', None)),
+ ('abc:', ImpersonateTarget('abc', None, None, None)),
+ ('abc-120:', ImpersonateTarget('abc', '120', None, None)),
+ (':xyz', ImpersonateTarget(None, None, 'xyz', None)),
+ (':xyz-6.5', ImpersonateTarget(None, None, 'xyz', '6.5')),
+ (':', ImpersonateTarget(None, None, None, None)),
+ ('', ImpersonateTarget(None, None, None, None)),
+ ])
+ def test_target_from_str(self, target_str, expected):
+ assert ImpersonateTarget.from_str(target_str) == expected
+
+ @pytest.mark.parametrize('target_str', [
+ '-120', ':-12.0', '-12:-12', '-:-',
+ '::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:'
+ ])
+ def test_target_from_invalid_str(self, target_str):
+ with pytest.raises(ValueError):
+ ImpersonateTarget.from_str(target_str)
+
+ @pytest.mark.parametrize('target,expected', [
+ (ImpersonateTarget('abc', None, None, None), 'abc'),
+ (ImpersonateTarget('abc', '120', None, None), 'abc-120'),
+ (ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
+ (ImpersonateTarget('abc', '120', 'xyz', '5'), 'abc-120:xyz-5'),
+ (ImpersonateTarget('abc', None, 'xyz', None), 'abc:xyz'),
+ (ImpersonateTarget('abc', '120', None, None), 'abc-120'),
+ (ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
+ (ImpersonateTarget('abc', None, 'xyz'), 'abc:xyz'),
+ (ImpersonateTarget(None, None, 'xyz', '6.5'), ':xyz-6.5'),
+ (ImpersonateTarget('abc', ), 'abc'),
+ (ImpersonateTarget(None, None, None, None), ''),
+ ])
+ def test_str(self, target, expected):
+ assert str(target) == expected
+
+ @pytest.mark.parametrize('args', [
+ ('abc', None, None, '5'),
+ ('abc', '120', None, '5'),
+ (None, '120', None, None),
+ (None, '120', None, '5'),
+ (None, None, None, '5'),
+ (None, '120', 'xyz', '5'),
+ ])
+ def test_invalid_impersonate_target(self, args):
+ with pytest.raises(ValueError):
+ ImpersonateTarget(*args)
+
+ @pytest.mark.parametrize('target1,target2,is_in,is_eq', [
+ (ImpersonateTarget('abc', None, None, None), ImpersonateTarget('abc', None, None, None), True, True),
+ (ImpersonateTarget('abc', None, None, None), ImpersonateTarget('abc', '120', None, None), True, False),
+ (ImpersonateTarget('abc', None, 'xyz', 'test'), ImpersonateTarget('abc', '120', 'xyz', None), True, False),
+ (ImpersonateTarget('abc', '121', 'xyz', 'test'), ImpersonateTarget('abc', '120', 'xyz', 'test'), False, False),
+ (ImpersonateTarget('abc'), ImpersonateTarget('abc', '120', 'xyz', 'test'), True, False),
+ (ImpersonateTarget('abc', '120', 'xyz', 'test'), ImpersonateTarget('abc'), True, False),
+ (ImpersonateTarget(), ImpersonateTarget('abc', '120', 'xyz'), True, False),
+ (ImpersonateTarget(), ImpersonateTarget(), True, True),
+ ])
+ def test_impersonate_target_in(self, target1, target2, is_in, is_eq):
+ assert (target1 in target2) is is_in
+ assert (target1 == target2) is is_eq
diff --git a/test/test_socks.py b/test/test_socks.py
index cb22b61..43d612d 100644
--- a/test/test_socks.py
+++ b/test/test_socks.py
@@ -286,8 +286,14 @@ def ctx(request):
return CTX_MAP[request.param]()
+@pytest.mark.parametrize(
+ 'handler,ctx', [
+ ('Urllib', 'http'),
+ ('Requests', 'http'),
+ ('Websockets', 'ws'),
+ ('CurlCFFI', 'http')
+ ], indirect=True)
class TestSocks4Proxy:
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks4_no_auth(self, handler, ctx):
with handler() as rh:
with ctx.socks_server(Socks4ProxyHandler) as server_address:
@@ -295,7 +301,6 @@ class TestSocks4Proxy:
rh, proxies={'all': f'socks4://{server_address}'})
assert response['version'] == 4
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks4_auth(self, handler, ctx):
with handler() as rh:
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
@@ -305,7 +310,6 @@ class TestSocks4Proxy:
rh, proxies={'all': f'socks4://user:@{server_address}'})
assert response['version'] == 4
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks4a_ipv4_target(self, handler, ctx):
with ctx.socks_server(Socks4ProxyHandler) as server_address:
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
@@ -313,7 +317,6 @@ class TestSocks4Proxy:
assert response['version'] == 4
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks4a_domain_target(self, handler, ctx):
with ctx.socks_server(Socks4ProxyHandler) as server_address:
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
@@ -322,7 +325,6 @@ class TestSocks4Proxy:
assert response['ipv4_address'] is None
assert response['domain_address'] == 'localhost'
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_ipv4_client_source_address(self, handler, ctx):
with ctx.socks_server(Socks4ProxyHandler) as server_address:
source_address = f'127.0.0.{random.randint(5, 255)}'
@@ -333,7 +335,6 @@ class TestSocks4Proxy:
assert response['client_address'][0] == source_address
assert response['version'] == 4
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
@pytest.mark.parametrize('reply_code', [
Socks4CD.REQUEST_REJECTED_OR_FAILED,
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
@@ -345,7 +346,6 @@ class TestSocks4Proxy:
with pytest.raises(ProxyError):
ctx.socks_info_request(rh)
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_ipv6_socks4_proxy(self, handler, ctx):
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
@@ -354,7 +354,6 @@ class TestSocks4Proxy:
assert response['ipv4_address'] == '127.0.0.1'
assert response['version'] == 4
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_timeout(self, handler, ctx):
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
@@ -362,9 +361,15 @@ class TestSocks4Proxy:
ctx.socks_info_request(rh)
+@pytest.mark.parametrize(
+ 'handler,ctx', [
+ ('Urllib', 'http'),
+ ('Requests', 'http'),
+ ('Websockets', 'ws'),
+ ('CurlCFFI', 'http')
+ ], indirect=True)
class TestSocks5Proxy:
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks5_no_auth(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler) as server_address:
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
@@ -372,7 +377,6 @@ class TestSocks5Proxy:
assert response['auth_methods'] == [0x0]
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks5_user_pass(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
with handler() as rh:
@@ -385,7 +389,6 @@ class TestSocks5Proxy:
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks5_ipv4_target(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler) as server_address:
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
@@ -393,7 +396,6 @@ class TestSocks5Proxy:
assert response['ipv4_address'] == '127.0.0.1'
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks5_domain_target(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler) as server_address:
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
@@ -401,7 +403,6 @@ class TestSocks5Proxy:
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks5h_domain_target(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler) as server_address:
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
@@ -410,7 +411,6 @@ class TestSocks5Proxy:
assert response['domain_address'] == 'localhost'
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks5h_ip_target(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler) as server_address:
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
@@ -419,7 +419,6 @@ class TestSocks5Proxy:
assert response['domain_address'] is None
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_socks5_ipv6_destination(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler) as server_address:
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
@@ -427,7 +426,6 @@ class TestSocks5Proxy:
assert response['ipv6_address'] == '::1'
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_ipv6_socks5_proxy(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
@@ -438,7 +436,6 @@ class TestSocks5Proxy:
# XXX: is there any feasible way of testing IPv6 source addresses?
# Same would go for non-proxy source_address test...
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
def test_ipv4_client_source_address(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler) as server_address:
source_address = f'127.0.0.{random.randint(5, 255)}'
@@ -448,7 +445,6 @@ class TestSocks5Proxy:
assert response['client_address'][0] == source_address
assert response['version'] == 5
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
@pytest.mark.parametrize('reply_code', [
Socks5Reply.GENERAL_FAILURE,
Socks5Reply.CONNECTION_NOT_ALLOWED,
@@ -465,7 +461,6 @@ class TestSocks5Proxy:
with pytest.raises(ProxyError):
ctx.socks_info_request(rh)
- @pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Websockets', 'ws')], indirect=True)
def test_timeout(self, handler, ctx):
with ctx.socks_server(Socks5ProxyHandler, sleep=2) as server_address:
with handler(proxies={'all': f'socks5://{server_address}'}, timeout=1) as rh:
diff --git a/test/test_traversal.py b/test/test_traversal.py
new file mode 100644
index 0000000..9b2a27b
--- /dev/null
+++ b/test/test_traversal.py
@@ -0,0 +1,444 @@
+import http.cookies
+import re
+import xml.etree.ElementTree
+
+import pytest
+
+from yt_dlp.utils import dict_get, int_or_none, str_or_none
+from yt_dlp.utils.traversal import traverse_obj
+
+_TEST_DATA = {
+ 100: 100,
+ 1.2: 1.2,
+ 'str': 'str',
+ 'None': None,
+ '...': ...,
+ 'urls': [
+ {'index': 0, 'url': 'https://www.example.com/0'},
+ {'index': 1, 'url': 'https://www.example.com/1'},
+ ],
+ 'data': (
+ {'index': 2},
+ {'index': 3},
+ ),
+ 'dict': {},
+}
+
+
+class TestTraversal:
+ def test_traversal_base(self):
+ assert traverse_obj(_TEST_DATA, ('str',)) == 'str', \
+ 'allow tuple path'
+ assert traverse_obj(_TEST_DATA, ['str']) == 'str', \
+ 'allow list path'
+ assert traverse_obj(_TEST_DATA, (value for value in ("str",))) == 'str', \
+ 'allow iterable path'
+ assert traverse_obj(_TEST_DATA, 'str') == 'str', \
+ 'single items should be treated as a path'
+ assert traverse_obj(_TEST_DATA, 100) == 100, \
+ 'allow int path'
+ assert traverse_obj(_TEST_DATA, 1.2) == 1.2, \
+ 'allow float path'
+ assert traverse_obj(_TEST_DATA, None) == _TEST_DATA, \
+ '`None` should not perform any modification'
+
+ def test_traversal_ellipsis(self):
+ assert traverse_obj(_TEST_DATA, ...) == [x for x in _TEST_DATA.values() if x not in (None, {})], \
+ '`...` should give all non discarded values'
+ assert traverse_obj(_TEST_DATA, ('urls', 0, ...)) == list(_TEST_DATA['urls'][0].values()), \
+ '`...` selection for dicts should select all values'
+ assert traverse_obj(_TEST_DATA, (..., ..., 'url')) == ['https://www.example.com/0', 'https://www.example.com/1'], \
+ 'nested `...` queries should work'
+ assert traverse_obj(_TEST_DATA, (..., ..., 'index')) == list(range(4)), \
+ '`...` query result should be flattened'
+ assert traverse_obj(iter(range(4)), ...) == list(range(4)), \
+ '`...` should accept iterables'
+
+ def test_traversal_function(self):
+ filter_func = lambda x, y: x == 'urls' and isinstance(y, list)
+ assert traverse_obj(_TEST_DATA, filter_func) == [_TEST_DATA['urls']], \
+ 'function as query key should perform a filter based on (key, value)'
+ assert traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)) == ['str'], \
+ 'exceptions in the query function should be catched'
+ assert traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0) == [0, 2], \
+ 'function key should accept iterables'
+ # Wrong function signature should raise (debug mode)
+ with pytest.raises(Exception):
+ traverse_obj(_TEST_DATA, lambda a: ...)
+ with pytest.raises(Exception):
+ traverse_obj(_TEST_DATA, lambda a, b, c: ...)
+
+ def test_traversal_set(self):
+ # transformation/type, like `expected_type`
+ assert traverse_obj(_TEST_DATA, (..., {str.upper}, )) == ['STR'], \
+ 'Function in set should be a transformation'
+ assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \
+ 'Type in set should be a type filter'
+ assert traverse_obj(_TEST_DATA, (..., {str, int})) == [100, 'str'], \
+ 'Multiple types in set should be a type filter'
+ assert traverse_obj(_TEST_DATA, {dict}) == _TEST_DATA, \
+ 'A single set should be wrapped into a path'
+ assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \
+ 'Transformation function should not raise'
+ expected = [x for x in map(str_or_none, _TEST_DATA.values()) if x is not None]
+ assert traverse_obj(_TEST_DATA, (..., {str_or_none})) == expected, \
+ 'Function in set should be a transformation'
+ assert traverse_obj(_TEST_DATA, ('fail', {lambda _: 'const'})) == 'const', \
+ 'Function in set should always be called'
+ # Sets with length < 1 or > 1 not including only types should raise
+ with pytest.raises(Exception):
+ traverse_obj(_TEST_DATA, set())
+ with pytest.raises(Exception):
+ traverse_obj(_TEST_DATA, {str.upper, str})
+
+ def test_traversal_slice(self):
+ _SLICE_DATA = [0, 1, 2, 3, 4]
+
+ assert traverse_obj(_TEST_DATA, ('dict', slice(1))) is None, \
+ 'slice on a dictionary should not throw'
+ assert traverse_obj(_SLICE_DATA, slice(1)) == _SLICE_DATA[:1], \
+ 'slice key should apply slice to sequence'
+ assert traverse_obj(_SLICE_DATA, slice(1, 2)) == _SLICE_DATA[1:2], \
+ 'slice key should apply slice to sequence'
+ assert traverse_obj(_SLICE_DATA, slice(1, 4, 2)) == _SLICE_DATA[1:4:2], \
+ 'slice key should apply slice to sequence'
+
+ def test_traversal_alternatives(self):
+ assert traverse_obj(_TEST_DATA, 'fail', 'str') == 'str', \
+ 'multiple `paths` should be treated as alternative paths'
+ assert traverse_obj(_TEST_DATA, 'str', 100) == 'str', \
+ 'alternatives should exit early'
+ assert traverse_obj(_TEST_DATA, 'fail', 'fail') is None, \
+ 'alternatives should return `default` if exhausted'
+ assert traverse_obj(_TEST_DATA, (..., 'fail'), 100) == 100, \
+ 'alternatives should track their own branching return'
+ assert traverse_obj(_TEST_DATA, ('dict', ...), ('data', ...)) == list(_TEST_DATA['data']), \
+ 'alternatives on empty objects should search further'
+
+ def test_traversal_branching_nesting(self):
+ assert traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')) == ['https://www.example.com/0'], \
+ 'tuple as key should be treated as branches'
+ assert traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')) == ['https://www.example.com/0'], \
+ 'list as key should be treated as branches'
+ assert traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))) == ['https://www.example.com/0'], \
+ 'double nesting in path should be treated as paths'
+ assert traverse_obj(['0', [1, 2]], [(0, 1), 0]) == [1], \
+ 'do not fail early on branching'
+ expected = ['https://www.example.com/0', 'https://www.example.com/1']
+ assert traverse_obj(_TEST_DATA, ('urls', ((0, ('fail', 'url')), (1, 'url')))) == expected, \
+ 'tripple nesting in path should be treated as branches'
+ assert traverse_obj(_TEST_DATA, ('urls', ('fail', (..., 'url')))) == expected, \
+ 'ellipsis as branch path start gets flattened'
+
+ def test_traversal_dict(self):
+ assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}) == {0: 100, 1: 1.2}, \
+ 'dict key should result in a dict with the same keys'
+ expected = {0: 'https://www.example.com/0'}
+ assert traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}) == expected, \
+ 'dict key should allow paths'
+ expected = {0: ['https://www.example.com/0']}
+ assert traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}) == expected, \
+ 'tuple in dict path should be treated as branches'
+ assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}) == expected, \
+ 'double nesting in dict path should be treated as paths'
+ expected = {0: ['https://www.example.com/1', 'https://www.example.com/0']}
+ assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}) == expected, \
+ 'tripple nesting in dict path should be treated as branches'
+ assert traverse_obj(_TEST_DATA, {0: 'fail'}) == {}, \
+ 'remove `None` values when top level dict key fails'
+ assert traverse_obj(_TEST_DATA, {0: 'fail'}, default=...) == {0: ...}, \
+ 'use `default` if key fails and `default`'
+ assert traverse_obj(_TEST_DATA, {0: 'dict'}) == {}, \
+ 'remove empty values when dict key'
+ assert traverse_obj(_TEST_DATA, {0: 'dict'}, default=...) == {0: ...}, \
+ 'use `default` when dict key and `default`'
+ assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}) == {}, \
+ 'remove empty values when nested dict key fails'
+ assert traverse_obj(None, {0: 'fail'}) == {}, \
+ 'default to dict if pruned'
+ assert traverse_obj(None, {0: 'fail'}, default=...) == {0: ...}, \
+ 'default to dict if pruned and default is given'
+ assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=...) == {0: {0: ...}}, \
+ 'use nested `default` when nested dict key fails and `default`'
+ assert traverse_obj(_TEST_DATA, {0: ('dict', ...)}) == {}, \
+ 'remove key if branch in dict key not successful'
+
+ def test_traversal_default(self):
+ _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
+
+ assert traverse_obj(_DEFAULT_DATA, 'fail') is None, \
+ 'default value should be `None`'
+ assert traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=...) == ..., \
+ 'chained fails should result in default'
+ assert traverse_obj(_DEFAULT_DATA, 'None', 'int') == 0, \
+ 'should not short cirquit on `None`'
+ assert traverse_obj(_DEFAULT_DATA, 'fail', default=1) == 1, \
+ 'invalid dict key should result in `default`'
+ assert traverse_obj(_DEFAULT_DATA, 'None', default=1) == 1, \
+ '`None` is a deliberate sentinel and should become `default`'
+ assert traverse_obj(_DEFAULT_DATA, ('list', 10)) is None, \
+ '`IndexError` should result in `default`'
+ assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=1) == 1, \
+ 'if branched but not successful return `default` if defined, not `[]`'
+ assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=None) is None, \
+ 'if branched but not successful return `default` even if `default` is `None`'
+ assert traverse_obj(_DEFAULT_DATA, (..., 'fail')) == [], \
+ 'if branched but not successful return `[]`, not `default`'
+ assert traverse_obj(_DEFAULT_DATA, ('list', ...)) == [], \
+ 'if branched but object is empty return `[]`, not `default`'
+ assert traverse_obj(None, ...) == [], \
+ 'if branched but object is `None` return `[]`, not `default`'
+ assert traverse_obj({0: None}, (0, ...)) == [], \
+ 'if branched but state is `None` return `[]`, not `default`'
+
+ @pytest.mark.parametrize('path', [
+ ('fail', ...),
+ (..., 'fail'),
+ 100 * ('fail',) + (...,),
+ (...,) + 100 * ('fail',),
+ ])
+ def test_traversal_branching(self, path):
+ assert traverse_obj({}, path) == [], \
+ 'if branched but state is `None`, return `[]` (not `default`)'
+ assert traverse_obj({}, 'fail', path) == [], \
+ 'if branching in last alternative and previous did not match, return `[]` (not `default`)'
+ assert traverse_obj({0: 'x'}, 0, path) == 'x', \
+ 'if branching in last alternative and previous did match, return single value'
+ assert traverse_obj({0: 'x'}, path, 0) == 'x', \
+ 'if branching in first alternative and non-branching path does match, return single value'
+ assert traverse_obj({}, path, 'fail') is None, \
+ 'if branching in first alternative and non-branching path does not match, return `default`'
+
+ def test_traversal_expected_type(self):
+ _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
+
+ assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str) == 'str', \
+ 'accept matching `expected_type` type'
+ assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int) is None, \
+ 'reject non matching `expected_type` type'
+ assert traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)) == '0', \
+ 'transform type using type function'
+ assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0) is None, \
+ 'wrap expected_type fuction in try_call'
+ assert traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str) == ['str'], \
+ 'eliminate items that expected_type fails on'
+ assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int) == {0: 100}, \
+ 'type as expected_type should filter dict values'
+ assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none) == {0: '100', 1: '1.2'}, \
+ 'function as expected_type should transform dict values'
+ assert traverse_obj(_TEST_DATA, ({0: 1.2}, 0, {int_or_none}), expected_type=int) == 1, \
+ 'expected_type should not filter non final dict values'
+ assert traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int) == {0: {0: 100}}, \
+ 'expected_type should transform deep dict values'
+ assert traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(...)) == [{0: ...}, {0: ...}], \
+ 'expected_type should transform branched dict values'
+ assert traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int) == [4], \
+ 'expected_type regression for type matching in tuple branching'
+ assert traverse_obj(_TEST_DATA, ['data', ...], expected_type=int) == [], \
+ 'expected_type regression for type matching in dict result'
+
+ def test_traversal_get_all(self):
+ _GET_ALL_DATA = {'key': [0, 1, 2]}
+
+ assert traverse_obj(_GET_ALL_DATA, ('key', ...), get_all=False) == 0, \
+ 'if not `get_all`, return only first matching value'
+ assert traverse_obj(_GET_ALL_DATA, ..., get_all=False) == [0, 1, 2], \
+ 'do not overflatten if not `get_all`'
+
+ def test_traversal_casesense(self):
+ _CASESENSE_DATA = {
+ 'KeY': 'value0',
+ 0: {
+ 'KeY': 'value1',
+ 0: {'KeY': 'value2'},
+ },
+ }
+
+ assert traverse_obj(_CASESENSE_DATA, 'key') is None, \
+ 'dict keys should be case sensitive unless `casesense`'
+ assert traverse_obj(_CASESENSE_DATA, 'keY', casesense=False) == 'value0', \
+ 'allow non matching key case if `casesense`'
+ assert traverse_obj(_CASESENSE_DATA, [0, ('keY',)], casesense=False) == ['value1'], \
+ 'allow non matching key case in branch if `casesense`'
+ assert traverse_obj(_CASESENSE_DATA, [0, ([0, 'keY'],)], casesense=False) == ['value2'], \
+ 'allow non matching key case in branch path if `casesense`'
+
+ def test_traversal_traverse_string(self):
+ _TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2}
+
+ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)) is None, \
+ 'do not traverse into string if not `traverse_string`'
+ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0), traverse_string=True) == 's', \
+ 'traverse into string if `traverse_string`'
+ assert traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1), traverse_string=True) == '.', \
+ 'traverse into converted data if `traverse_string`'
+ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', ...), traverse_string=True) == 'str', \
+ '`...` should result in string (same value) if `traverse_string`'
+ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \
+ '`slice` should result in string if `traverse_string`'
+ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"), traverse_string=True) == 'str', \
+ 'function should result in string if `traverse_string`'
+ assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \
+ 'branching should result in list if `traverse_string`'
+ assert traverse_obj({}, (0, ...), traverse_string=True) == [], \
+ 'branching should result in list if `traverse_string`'
+ assert traverse_obj({}, (0, lambda x, y: True), traverse_string=True) == [], \
+ 'branching should result in list if `traverse_string`'
+ assert traverse_obj({}, (0, slice(1)), traverse_string=True) == [], \
+ 'branching should result in list if `traverse_string`'
+
+ def test_traversal_re(self):
+ mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123')
+ assert traverse_obj(mobj, ...) == [x for x in mobj.groups() if x is not None], \
+ '`...` on a `re.Match` should give its `groups()`'
+ assert traverse_obj(mobj, lambda k, _: k in (0, 2)) == ['0123', '3'], \
+ 'function on a `re.Match` should give groupno, value starting at 0'
+ assert traverse_obj(mobj, 'group') == '3', \
+ 'str key on a `re.Match` should give group with that name'
+ assert traverse_obj(mobj, 2) == '3', \
+ 'int key on a `re.Match` should give group with that name'
+ assert traverse_obj(mobj, 'gRoUp', casesense=False) == '3', \
+ 'str key on a `re.Match` should respect casesense'
+ assert traverse_obj(mobj, 'fail') is None, \
+ 'failing str key on a `re.Match` should return `default`'
+ assert traverse_obj(mobj, 'gRoUpS', casesense=False) is None, \
+ 'failing str key on a `re.Match` should return `default`'
+ assert traverse_obj(mobj, 8) is None, \
+ 'failing int key on a `re.Match` should return `default`'
+ assert traverse_obj(mobj, lambda k, _: k in (0, 'group')) == ['0123', '3'], \
+ 'function on a `re.Match` should give group name as well'
+
+ def test_traversal_xml_etree(self):
+ etree = xml.etree.ElementTree.fromstring('''<?xml version="1.0"?>
+ <data>
+ <country name="Liechtenstein">
+ <rank>1</rank>
+ <year>2008</year>
+ <gdppc>141100</gdppc>
+ <neighbor name="Austria" direction="E"/>
+ <neighbor name="Switzerland" direction="W"/>
+ </country>
+ <country name="Singapore">
+ <rank>4</rank>
+ <year>2011</year>
+ <gdppc>59900</gdppc>
+ <neighbor name="Malaysia" direction="N"/>
+ </country>
+ <country name="Panama">
+ <rank>68</rank>
+ <year>2011</year>
+ <gdppc>13600</gdppc>
+ <neighbor name="Costa Rica" direction="W"/>
+ <neighbor name="Colombia" direction="E"/>
+ </country>
+ </data>''')
+ assert traverse_obj(etree, '') == etree, \
+ 'empty str key should return the element itself'
+ assert traverse_obj(etree, 'country') == list(etree), \
+ 'str key should lead all children with that tag name'
+ assert traverse_obj(etree, ...) == list(etree), \
+ '`...` as key should return all children'
+ assert traverse_obj(etree, lambda _, x: x[0].text == '4') == [etree[1]], \
+ 'function as key should get element as value'
+ assert traverse_obj(etree, lambda i, _: i == 1) == [etree[1]], \
+ 'function as key should get index as key'
+ assert traverse_obj(etree, 0) == etree[0], \
+ 'int key should return the nth child'
+ expected = ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia']
+ assert traverse_obj(etree, './/neighbor/@name') == expected, \
+ '`@<attribute>` at end of path should give that attribute'
+ assert traverse_obj(etree, '//neighbor/@fail') == [None, None, None, None, None], \
+ '`@<nonexistant>` at end of path should give `None`'
+ assert traverse_obj(etree, ('//neighbor/@', 2)) == {'name': 'Malaysia', 'direction': 'N'}, \
+ '`@` should give the full attribute dict'
+ assert traverse_obj(etree, '//year/text()') == ['2008', '2011', '2011'], \
+ '`text()` at end of path should give the inner text'
+ assert traverse_obj(etree, '//*[@direction]/@direction') == ['E', 'W', 'N', 'W', 'E'], \
+ 'full Python xpath features should be supported'
+ assert traverse_obj(etree, (0, '@name')) == 'Liechtenstein', \
+ 'special transformations should act on current element'
+ assert traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})) == [1, 2008, 141100], \
+ 'special transformations should act on current element'
+
+ def test_traversal_unbranching(self):
+ assert traverse_obj(_TEST_DATA, [(100, 1.2), all]) == [100, 1.2], \
+ '`all` should give all results as list'
+ assert traverse_obj(_TEST_DATA, [(100, 1.2), any]) == 100, \
+ '`any` should give the first result'
+ assert traverse_obj(_TEST_DATA, [100, all]) == [100], \
+ '`all` should give list if non branching'
+ assert traverse_obj(_TEST_DATA, [100, any]) == 100, \
+ '`any` should give single item if non branching'
+ assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), all]) == [100], \
+ '`all` should filter `None` and empty dict'
+ assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), any]) == 100, \
+ '`any` should filter `None` and empty dict'
+ assert traverse_obj(_TEST_DATA, [{
+ 'all': [('dict', 'None', 100, 1.2), all],
+ 'any': [('dict', 'None', 100, 1.2), any],
+ }]) == {'all': [100, 1.2], 'any': 100}, \
+ '`all`/`any` should apply to each dict path separately'
+ assert traverse_obj(_TEST_DATA, [{
+ 'all': [('dict', 'None', 100, 1.2), all],
+ 'any': [('dict', 'None', 100, 1.2), any],
+ }], get_all=False) == {'all': [100, 1.2], 'any': 100}, \
+ '`all`/`any` should apply to dict regardless of `get_all`'
+ assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, {float}]) is None, \
+ '`all` should reset branching status'
+ assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), any, {float}]) is None, \
+ '`any` should reset branching status'
+ assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, ..., {float}]) == [1.2], \
+ '`all` should allow further branching'
+ assert traverse_obj(_TEST_DATA, [('dict', 'None', 'urls', 'data'), any, ..., 'index']) == [0, 1], \
+ '`any` should allow further branching'
+
+ def test_traversal_morsel(self):
+ values = {
+ 'expires': 'a',
+ 'path': 'b',
+ 'comment': 'c',
+ 'domain': 'd',
+ 'max-age': 'e',
+ 'secure': 'f',
+ 'httponly': 'g',
+ 'version': 'h',
+ 'samesite': 'i',
+ }
+ morsel = http.cookies.Morsel()
+ morsel.set('item_key', 'item_value', 'coded_value')
+ morsel.update(values)
+ values['key'] = 'item_key'
+ values['value'] = 'item_value'
+
+ for key, value in values.items():
+ assert traverse_obj(morsel, key) == value, \
+ 'Morsel should provide access to all values'
+ assert traverse_obj(morsel, ...) == list(values.values()), \
+ '`...` should yield all values'
+ assert traverse_obj(morsel, lambda k, v: True) == list(values.values()), \
+ 'function key should yield all values'
+ assert traverse_obj(morsel, [(None,), any]) == morsel, \
+ 'Morsel should not be implicitly changed to dict on usage'
+
+
+class TestDictGet:
+ def test_dict_get(self):
+ FALSE_VALUES = {
+ 'none': None,
+ 'false': False,
+ 'zero': 0,
+ 'empty_string': '',
+ 'empty_list': [],
+ }
+ d = {**FALSE_VALUES, 'a': 42}
+ assert dict_get(d, 'a') == 42
+ assert dict_get(d, 'b') is None
+ assert dict_get(d, 'b', 42) == 42
+ assert dict_get(d, ('a',)) == 42
+ assert dict_get(d, ('b', 'a')) == 42
+ assert dict_get(d, ('b', 'c', 'a', 'd')) == 42
+ assert dict_get(d, ('b', 'c')) is None
+ assert dict_get(d, ('b', 'c'), 42) == 42
+ for key, false_value in FALSE_VALUES.items():
+ assert dict_get(d, ('b', 'c', key)) is None
+ assert dict_get(d, ('b', 'c', key), skip_false_values=False) == false_value
diff --git a/test/test_utils.py b/test/test_utils.py
index a3073f0..ddf0a7c 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -2,7 +2,6 @@
# Allow direct execution
import os
-import re
import sys
import unittest
import warnings
@@ -45,7 +44,6 @@ from yt_dlp.utils import (
determine_ext,
determine_file_encoding,
dfxp2srt,
- dict_get,
encode_base_n,
encode_compat_str,
encodeFilename,
@@ -106,13 +104,11 @@ from yt_dlp.utils import (
sanitize_url,
shell_quote,
smuggle_url,
- str_or_none,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
- traverse_obj,
try_call,
unescapeHTML,
unified_strdate,
@@ -755,28 +751,6 @@ class TestUtil(unittest.TestCase):
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
- def test_dict_get(self):
- FALSE_VALUES = {
- 'none': None,
- 'false': False,
- 'zero': 0,
- 'empty_string': '',
- 'empty_list': [],
- }
- d = FALSE_VALUES.copy()
- d['a'] = 42
- self.assertEqual(dict_get(d, 'a'), 42)
- self.assertEqual(dict_get(d, 'b'), None)
- self.assertEqual(dict_get(d, 'b', 42), 42)
- self.assertEqual(dict_get(d, ('a', )), 42)
- self.assertEqual(dict_get(d, ('b', 'a', )), 42)
- self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
- self.assertEqual(dict_get(d, ('b', 'c', )), None)
- self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
- for key, false_value in FALSE_VALUES.items():
- self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
- self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
-
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
@@ -2039,359 +2013,6 @@ Line 1
warnings.simplefilter('ignore')
self.assertEqual(variadic('spam', allowed_types=[dict]), 'spam')
- def test_traverse_obj(self):
- _TEST_DATA = {
- 100: 100,
- 1.2: 1.2,
- 'str': 'str',
- 'None': None,
- '...': ...,
- 'urls': [
- {'index': 0, 'url': 'https://www.example.com/0'},
- {'index': 1, 'url': 'https://www.example.com/1'},
- ],
- 'data': (
- {'index': 2},
- {'index': 3},
- ),
- 'dict': {},
- }
-
- # Test base functionality
- self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str',
- msg='allow tuple path')
- self.assertEqual(traverse_obj(_TEST_DATA, ['str']), 'str',
- msg='allow list path')
- self.assertEqual(traverse_obj(_TEST_DATA, (value for value in ("str",))), 'str',
- msg='allow iterable path')
- self.assertEqual(traverse_obj(_TEST_DATA, 'str'), 'str',
- msg='single items should be treated as a path')
- self.assertEqual(traverse_obj(_TEST_DATA, None), _TEST_DATA)
- self.assertEqual(traverse_obj(_TEST_DATA, 100), 100)
- self.assertEqual(traverse_obj(_TEST_DATA, 1.2), 1.2)
-
- # Test Ellipsis behavior
- self.assertCountEqual(traverse_obj(_TEST_DATA, ...),
- (item for item in _TEST_DATA.values() if item not in (None, {})),
- msg='`...` should give all non discarded values')
- self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, ...)), _TEST_DATA['urls'][0].values(),
- msg='`...` selection for dicts should select all values')
- self.assertEqual(traverse_obj(_TEST_DATA, (..., ..., 'url')),
- ['https://www.example.com/0', 'https://www.example.com/1'],
- msg='nested `...` queries should work')
- self.assertCountEqual(traverse_obj(_TEST_DATA, (..., ..., 'index')), range(4),
- msg='`...` query result should be flattened')
- self.assertEqual(traverse_obj(iter(range(4)), ...), list(range(4)),
- msg='`...` should accept iterables')
-
- # Test function as key
- self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)),
- [_TEST_DATA['urls']],
- msg='function as query key should perform a filter based on (key, value)')
- self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), {'str'},
- msg='exceptions in the query function should be catched')
- self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2],
- msg='function key should accept iterables')
- if __debug__:
- with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
- traverse_obj(_TEST_DATA, lambda a: ...)
- with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
- traverse_obj(_TEST_DATA, lambda a, b, c: ...)
-
- # Test set as key (transformation/type, like `expected_type`)
- self.assertEqual(traverse_obj(_TEST_DATA, (..., {str.upper}, )), ['STR'],
- msg='Function in set should be a transformation')
- self.assertEqual(traverse_obj(_TEST_DATA, (..., {str})), ['str'],
- msg='Type in set should be a type filter')
- self.assertEqual(traverse_obj(_TEST_DATA, {dict}), _TEST_DATA,
- msg='A single set should be wrapped into a path')
- self.assertEqual(traverse_obj(_TEST_DATA, (..., {str.upper})), ['STR'],
- msg='Transformation function should not raise')
- self.assertEqual(traverse_obj(_TEST_DATA, (..., {str_or_none})),
- [item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
- msg='Function in set should be a transformation')
- self.assertEqual(traverse_obj(_TEST_DATA, ('fail', {lambda _: 'const'})), 'const',
- msg='Function in set should always be called')
- if __debug__:
- with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
- traverse_obj(_TEST_DATA, set())
- with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
- traverse_obj(_TEST_DATA, {str.upper, str})
-
- # Test `slice` as a key
- _SLICE_DATA = [0, 1, 2, 3, 4]
- self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None,
- msg='slice on a dictionary should not throw')
- self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1],
- msg='slice key should apply slice to sequence')
- self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2],
- msg='slice key should apply slice to sequence')
- self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2],
- msg='slice key should apply slice to sequence')
-
- # Test alternative paths
- self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str',
- msg='multiple `paths` should be treated as alternative paths')
- self.assertEqual(traverse_obj(_TEST_DATA, 'str', 100), 'str',
- msg='alternatives should exit early')
- self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'fail'), None,
- msg='alternatives should return `default` if exhausted')
- self.assertEqual(traverse_obj(_TEST_DATA, (..., 'fail'), 100), 100,
- msg='alternatives should track their own branching return')
- self.assertEqual(traverse_obj(_TEST_DATA, ('dict', ...), ('data', ...)), list(_TEST_DATA['data']),
- msg='alternatives on empty objects should search further')
-
- # Test branch and path nesting
- self.assertEqual(traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')), ['https://www.example.com/0'],
- msg='tuple as key should be treated as branches')
- self.assertEqual(traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')), ['https://www.example.com/0'],
- msg='list as key should be treated as branches')
- self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))), ['https://www.example.com/0'],
- msg='double nesting in path should be treated as paths')
- self.assertEqual(traverse_obj(['0', [1, 2]], [(0, 1), 0]), [1],
- msg='do not fail early on branching')
- self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', ((1, ('fail', 'url')), (0, 'url')))),
- ['https://www.example.com/0', 'https://www.example.com/1'],
- msg='tripple nesting in path should be treated as branches')
- self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ('fail', (..., 'url')))),
- ['https://www.example.com/0', 'https://www.example.com/1'],
- msg='ellipsis as branch path start gets flattened')
-
- # Test dictionary as key
- self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}), {0: 100, 1: 1.2},
- msg='dict key should result in a dict with the same keys')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}),
- {0: 'https://www.example.com/0'},
- msg='dict key should allow paths')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}),
- {0: ['https://www.example.com/0']},
- msg='tuple in dict path should be treated as branches')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}),
- {0: ['https://www.example.com/0']},
- msg='double nesting in dict path should be treated as paths')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}),
- {0: ['https://www.example.com/1', 'https://www.example.com/0']},
- msg='tripple nesting in dict path should be treated as branches')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {},
- msg='remove `None` values when top level dict key fails')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=...), {0: ...},
- msg='use `default` if key fails and `default`')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {},
- msg='remove empty values when dict key')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=...), {0: ...},
- msg='use `default` when dict key and `default`')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {},
- msg='remove empty values when nested dict key fails')
- self.assertEqual(traverse_obj(None, {0: 'fail'}), {},
- msg='default to dict if pruned')
- self.assertEqual(traverse_obj(None, {0: 'fail'}, default=...), {0: ...},
- msg='default to dict if pruned and default is given')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=...), {0: {0: ...}},
- msg='use nested `default` when nested dict key fails and `default`')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', ...)}), {},
- msg='remove key if branch in dict key not successful')
-
- # Testing default parameter behavior
- _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
- self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail'), None,
- msg='default value should be `None`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=...), ...,
- msg='chained fails should result in default')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', 'int'), 0,
- msg='should not short cirquit on `None`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', default=1), 1,
- msg='invalid dict key should result in `default`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', default=1), 1,
- msg='`None` is a deliberate sentinel and should become `default`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', 10)), None,
- msg='`IndexError` should result in `default`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=1), 1,
- msg='if branched but not successful return `default` if defined, not `[]`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=None), None,
- msg='if branched but not successful return `default` even if `default` is `None`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail')), [],
- msg='if branched but not successful return `[]`, not `default`')
- self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', ...)), [],
- msg='if branched but object is empty return `[]`, not `default`')
- self.assertEqual(traverse_obj(None, ...), [],
- msg='if branched but object is `None` return `[]`, not `default`')
- self.assertEqual(traverse_obj({0: None}, (0, ...)), [],
- msg='if branched but state is `None` return `[]`, not `default`')
-
- branching_paths = [
- ('fail', ...),
- (..., 'fail'),
- 100 * ('fail',) + (...,),
- (...,) + 100 * ('fail',),
- ]
- for branching_path in branching_paths:
- self.assertEqual(traverse_obj({}, branching_path), [],
- msg='if branched but state is `None`, return `[]` (not `default`)')
- self.assertEqual(traverse_obj({}, 'fail', branching_path), [],
- msg='if branching in last alternative and previous did not match, return `[]` (not `default`)')
- self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x',
- msg='if branching in last alternative and previous did match, return single value')
- self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x',
- msg='if branching in first alternative and non-branching path does match, return single value')
- self.assertEqual(traverse_obj({}, branching_path, 'fail'), None,
- msg='if branching in first alternative and non-branching path does not match, return `default`')
-
- # Testing expected_type behavior
- _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
- self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str),
- 'str', msg='accept matching `expected_type` type')
- self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int),
- None, msg='reject non matching `expected_type` type')
- self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)),
- '0', msg='transform type using type function')
- self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0),
- None, msg='wrap expected_type fuction in try_call')
- self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str),
- ['str'], msg='eliminate items that expected_type fails on')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int),
- {0: 100}, msg='type as expected_type should filter dict values')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none),
- {0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values')
- self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, {int_or_none}), expected_type=int),
- 1, msg='expected_type should not filter non final dict values')
- self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int),
- {0: {0: 100}}, msg='expected_type should transform deep dict values')
- self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(...)),
- [{0: ...}, {0: ...}], msg='expected_type should transform branched dict values')
- self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int),
- [4], msg='expected_type regression for type matching in tuple branching')
- self.assertEqual(traverse_obj(_TEST_DATA, ['data', ...], expected_type=int),
- [], msg='expected_type regression for type matching in dict result')
-
- # Test get_all behavior
- _GET_ALL_DATA = {'key': [0, 1, 2]}
- self.assertEqual(traverse_obj(_GET_ALL_DATA, ('key', ...), get_all=False), 0,
- msg='if not `get_all`, return only first matching value')
- self.assertEqual(traverse_obj(_GET_ALL_DATA, ..., get_all=False), [0, 1, 2],
- msg='do not overflatten if not `get_all`')
-
- # Test casesense behavior
- _CASESENSE_DATA = {
- 'KeY': 'value0',
- 0: {
- 'KeY': 'value1',
- 0: {'KeY': 'value2'},
- },
- }
- self.assertEqual(traverse_obj(_CASESENSE_DATA, 'key'), None,
- msg='dict keys should be case sensitive unless `casesense`')
- self.assertEqual(traverse_obj(_CASESENSE_DATA, 'keY',
- casesense=False), 'value0',
- msg='allow non matching key case if `casesense`')
- self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ('keY',)),
- casesense=False), ['value1'],
- msg='allow non matching key case in branch if `casesense`')
- self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ((0, 'keY'),)),
- casesense=False), ['value2'],
- msg='allow non matching key case in branch path if `casesense`')
-
- # Test traverse_string behavior
- _TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2}
- self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)), None,
- msg='do not traverse into string if not `traverse_string`')
- self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0),
- traverse_string=True), 's',
- msg='traverse into string if `traverse_string`')
- self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1),
- traverse_string=True), '.',
- msg='traverse into converted data if `traverse_string`')
- self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', ...),
- traverse_string=True), 'str',
- msg='`...` should result in string (same value) if `traverse_string`')
- self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)),
- traverse_string=True), 'sr',
- msg='`slice` should result in string if `traverse_string`')
- self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"),
- traverse_string=True), 'str',
- msg='function should result in string if `traverse_string`')
- self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)),
- traverse_string=True), ['s', 'r'],
- msg='branching should result in list if `traverse_string`')
- self.assertEqual(traverse_obj({}, (0, ...), traverse_string=True), [],
- msg='branching should result in list if `traverse_string`')
- self.assertEqual(traverse_obj({}, (0, lambda x, y: True), traverse_string=True), [],
- msg='branching should result in list if `traverse_string`')
- self.assertEqual(traverse_obj({}, (0, slice(1)), traverse_string=True), [],
- msg='branching should result in list if `traverse_string`')
-
- # Test re.Match as input obj
- mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123')
- self.assertEqual(traverse_obj(mobj, ...), [x for x in mobj.groups() if x is not None],
- msg='`...` on a `re.Match` should give its `groups()`')
- self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 2)), ['0123', '3'],
- msg='function on a `re.Match` should give groupno, value starting at 0')
- self.assertEqual(traverse_obj(mobj, 'group'), '3',
- msg='str key on a `re.Match` should give group with that name')
- self.assertEqual(traverse_obj(mobj, 2), '3',
- msg='int key on a `re.Match` should give group with that name')
- self.assertEqual(traverse_obj(mobj, 'gRoUp', casesense=False), '3',
- msg='str key on a `re.Match` should respect casesense')
- self.assertEqual(traverse_obj(mobj, 'fail'), None,
- msg='failing str key on a `re.Match` should return `default`')
- self.assertEqual(traverse_obj(mobj, 'gRoUpS', casesense=False), None,
- msg='failing str key on a `re.Match` should return `default`')
- self.assertEqual(traverse_obj(mobj, 8), None,
- msg='failing int key on a `re.Match` should return `default`')
- self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'],
- msg='function on a `re.Match` should give group name as well')
-
- # Test xml.etree.ElementTree.Element as input obj
- etree = xml.etree.ElementTree.fromstring('''<?xml version="1.0"?>
- <data>
- <country name="Liechtenstein">
- <rank>1</rank>
- <year>2008</year>
- <gdppc>141100</gdppc>
- <neighbor name="Austria" direction="E"/>
- <neighbor name="Switzerland" direction="W"/>
- </country>
- <country name="Singapore">
- <rank>4</rank>
- <year>2011</year>
- <gdppc>59900</gdppc>
- <neighbor name="Malaysia" direction="N"/>
- </country>
- <country name="Panama">
- <rank>68</rank>
- <year>2011</year>
- <gdppc>13600</gdppc>
- <neighbor name="Costa Rica" direction="W"/>
- <neighbor name="Colombia" direction="E"/>
- </country>
- </data>''')
- self.assertEqual(traverse_obj(etree, ''), etree,
- msg='empty str key should return the element itself')
- self.assertEqual(traverse_obj(etree, 'country'), list(etree),
- msg='str key should lead all children with that tag name')
- self.assertEqual(traverse_obj(etree, ...), list(etree),
- msg='`...` as key should return all children')
- self.assertEqual(traverse_obj(etree, lambda _, x: x[0].text == '4'), [etree[1]],
- msg='function as key should get element as value')
- self.assertEqual(traverse_obj(etree, lambda i, _: i == 1), [etree[1]],
- msg='function as key should get index as key')
- self.assertEqual(traverse_obj(etree, 0), etree[0],
- msg='int key should return the nth child')
- self.assertEqual(traverse_obj(etree, './/neighbor/@name'),
- ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia'],
- msg='`@<attribute>` at end of path should give that attribute')
- self.assertEqual(traverse_obj(etree, '//neighbor/@fail'), [None, None, None, None, None],
- msg='`@<nonexistant>` at end of path should give `None`')
- self.assertEqual(traverse_obj(etree, ('//neighbor/@', 2)), {'name': 'Malaysia', 'direction': 'N'},
- msg='`@` should give the full attribute dict')
- self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
- msg='`text()` at end of path should give the inner text')
- self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
- msg='full Python xpath features should be supported')
- self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
- msg='special transformations should act on current element')
- self.assertEqual(traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})), [1, 2008, 141100],
- msg='special transformations should act on current element')
-
def test_http_header_dict(self):
headers = HTTPHeaderDict()
headers['ytdl-test'] = b'0'
@@ -2448,6 +2069,10 @@ Line 1
# Test escaping
assert run_shell(['echo', 'test"&']) == '"test""&"\n'
+ assert run_shell(['echo', '%CMDCMDLINE:~-1%&']) == '"%CMDCMDLINE:~-1%&"\n'
+ assert run_shell(['echo', 'a\nb']) == '"a"\n"b"\n'
+ assert run_shell(['echo', '"']) == '""""\n'
+ assert run_shell(['echo', '\\']) == '\\\n'
# Test if delayed expansion is disabled
assert run_shell(['echo', '^!']) == '"^!"\n'
assert run_shell('echo "^!"') == '"^!"\n'
diff --git a/test/test_websockets.py b/test/test_websockets.py
index 13b3a1e..b294b09 100644
--- a/test/test_websockets.py
+++ b/test/test_websockets.py
@@ -32,8 +32,6 @@ from yt_dlp.networking.exceptions import (
)
from yt_dlp.utils.networking import HTTPHeaderDict
-from test.conftest import validate_and_send
-
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -66,7 +64,9 @@ def process_request(self, request):
def create_websocket_server(**ws_kwargs):
import websockets.sync.server
- wsd = websockets.sync.server.serve(websocket_handler, '127.0.0.1', 0, process_request=process_request, **ws_kwargs)
+ wsd = websockets.sync.server.serve(
+ websocket_handler, '127.0.0.1', 0,
+ process_request=process_request, open_timeout=2, **ws_kwargs)
ws_port = wsd.socket.getsockname()[1]
ws_server_thread = threading.Thread(target=wsd.serve_forever)
ws_server_thread.daemon = True
@@ -100,6 +100,19 @@ def create_mtls_wss_websocket_server():
return create_websocket_server(ssl_context=sslctx)
+def ws_validate_and_send(rh, req):
+ rh.validate(req)
+ max_tries = 3
+ for i in range(max_tries):
+ try:
+ return rh.send(req)
+ except TransportError as e:
+ if i < (max_tries - 1) and 'connection closed during handshake' in str(e):
+ # websockets server sometimes hangs on new connections
+ continue
+ raise
+
+
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
class TestWebsSocketRequestHandlerConformance:
@classmethod
@@ -119,7 +132,7 @@ class TestWebsSocketRequestHandlerConformance:
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
def test_basic_websockets(self, handler):
with handler() as rh:
- ws = validate_and_send(rh, Request(self.ws_base_url))
+ ws = ws_validate_and_send(rh, Request(self.ws_base_url))
assert 'upgrade' in ws.headers
assert ws.status == 101
ws.send('foo')
@@ -131,7 +144,7 @@ class TestWebsSocketRequestHandlerConformance:
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
def test_send_types(self, handler, msg, opcode):
with handler() as rh:
- ws = validate_and_send(rh, Request(self.ws_base_url))
+ ws = ws_validate_and_send(rh, Request(self.ws_base_url))
ws.send(msg)
assert int(ws.recv()) == opcode
ws.close()
@@ -140,10 +153,10 @@ class TestWebsSocketRequestHandlerConformance:
def test_verify_cert(self, handler):
with handler() as rh:
with pytest.raises(CertificateVerifyError):
- validate_and_send(rh, Request(self.wss_base_url))
+ ws_validate_and_send(rh, Request(self.wss_base_url))
with handler(verify=False) as rh:
- ws = validate_and_send(rh, Request(self.wss_base_url))
+ ws = ws_validate_and_send(rh, Request(self.wss_base_url))
assert ws.status == 101
ws.close()
@@ -151,7 +164,7 @@ class TestWebsSocketRequestHandlerConformance:
def test_ssl_error(self, handler):
with handler(verify=False) as rh:
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
- validate_and_send(rh, Request(self.bad_wss_host))
+ ws_validate_and_send(rh, Request(self.bad_wss_host))
assert not issubclass(exc_info.type, CertificateVerifyError)
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
@@ -163,7 +176,7 @@ class TestWebsSocketRequestHandlerConformance:
])
def test_percent_encode(self, handler, path, expected):
with handler() as rh:
- ws = validate_and_send(rh, Request(f'{self.ws_base_url}{path}'))
+ ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}{path}'))
ws.send('path')
assert ws.recv() == expected
assert ws.status == 101
@@ -174,7 +187,7 @@ class TestWebsSocketRequestHandlerConformance:
with handler() as rh:
# This isn't a comprehensive test,
# but it should be enough to check whether the handler is removing dot segments
- ws = validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test'))
+ ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test'))
assert ws.status == 101
ws.send('path')
assert ws.recv() == '/test'
@@ -187,7 +200,7 @@ class TestWebsSocketRequestHandlerConformance:
def test_raise_http_error(self, handler, status):
with handler() as rh:
with pytest.raises(HTTPError) as exc_info:
- validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
+ ws_validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
assert exc_info.value.status == status
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
@@ -198,7 +211,7 @@ class TestWebsSocketRequestHandlerConformance:
def test_timeout(self, handler, params, extensions):
with handler(**params) as rh:
with pytest.raises(TransportError):
- validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
+ ws_validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
def test_cookies(self, handler):
@@ -210,18 +223,18 @@ class TestWebsSocketRequestHandlerConformance:
comment_url=None, rest={}))
with handler(cookiejar=cookiejar) as rh:
- ws = validate_and_send(rh, Request(self.ws_base_url))
+ ws = ws_validate_and_send(rh, Request(self.ws_base_url))
ws.send('headers')
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
ws.close()
with handler() as rh:
- ws = validate_and_send(rh, Request(self.ws_base_url))
+ ws = ws_validate_and_send(rh, Request(self.ws_base_url))
ws.send('headers')
assert 'cookie' not in json.loads(ws.recv())
ws.close()
- ws = validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar}))
+ ws = ws_validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar}))
ws.send('headers')
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
ws.close()
@@ -231,7 +244,7 @@ class TestWebsSocketRequestHandlerConformance:
source_address = f'127.0.0.{random.randint(5, 255)}'
verify_address_availability(source_address)
with handler(source_address=source_address) as rh:
- ws = validate_and_send(rh, Request(self.ws_base_url))
+ ws = ws_validate_and_send(rh, Request(self.ws_base_url))
ws.send('source_address')
assert source_address == ws.recv()
ws.close()
@@ -240,7 +253,7 @@ class TestWebsSocketRequestHandlerConformance:
def test_response_url(self, handler):
with handler() as rh:
url = f'{self.ws_base_url}/something'
- ws = validate_and_send(rh, Request(url))
+ ws = ws_validate_and_send(rh, Request(url))
assert ws.url == url
ws.close()
@@ -248,14 +261,14 @@ class TestWebsSocketRequestHandlerConformance:
def test_request_headers(self, handler):
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
# Global Headers
- ws = validate_and_send(rh, Request(self.ws_base_url))
+ ws = ws_validate_and_send(rh, Request(self.ws_base_url))
ws.send('headers')
headers = HTTPHeaderDict(json.loads(ws.recv()))
assert headers['test1'] == 'test'
ws.close()
# Per request headers, merged with global
- ws = validate_and_send(rh, Request(
+ ws = ws_validate_and_send(rh, Request(
self.ws_base_url, headers={'test2': 'changed', 'test3': 'test3'}))
ws.send('headers')
headers = HTTPHeaderDict(json.loads(ws.recv()))
@@ -288,7 +301,7 @@ class TestWebsSocketRequestHandlerConformance:
verify=False,
client_cert=client_cert
) as rh:
- validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
+ ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
def create_fake_ws_connection(raised):
diff --git a/yt_dlp/YoutubeDL.py b/yt_dlp/YoutubeDL.py
index c34d97b..9f730d0 100644
--- a/yt_dlp/YoutubeDL.py
+++ b/yt_dlp/YoutubeDL.py
@@ -1,7 +1,7 @@
import collections
import contextlib
import copy
-import datetime
+import datetime as dt
import errno
import fileinput
import http.cookiejar
@@ -25,7 +25,7 @@ import unicodedata
from .cache import Cache
from .compat import functools, urllib # isort: split
-from .compat import compat_os_name, compat_shlex_quote, urllib_req_to_req
+from .compat import compat_os_name, urllib_req_to_req
from .cookies import LenientSimpleCookie, load_cookies
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
from .downloader.rtmp import rtmpdump_version
@@ -42,6 +42,7 @@ from .networking.exceptions import (
SSLError,
network_exceptions,
)
+from .networking.impersonate import ImpersonateRequestHandler
from .plugins import directories as plugin_directories
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
from .postprocessor import (
@@ -99,8 +100,8 @@ from .utils import (
SameFileError,
UnavailableVideoError,
UserNotLive,
+ YoutubeDLError,
age_restricted,
- args_to_str,
bug_reports_message,
date_from_str,
deprecation_warning,
@@ -139,11 +140,13 @@ from .utils import (
sanitize_filename,
sanitize_path,
sanitize_url,
+ shell_quote,
str_or_none,
strftime_or_none,
subtitles_filename,
supports_terminal_sequences,
system_identifier,
+ filesize_from_tbr,
timetuple_from_msec,
to_high_limit_path,
traverse_obj,
@@ -402,6 +405,8 @@ class YoutubeDL:
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: Client-side IP address to bind to.
+ impersonate: Client to impersonate for requests.
+ An ImpersonateTarget (from yt_dlp.networking.impersonate)
sleep_interval_requests: Number of seconds to sleep between requests
during extraction
sleep_interval: Number of seconds to sleep before each download when
@@ -476,7 +481,7 @@ class YoutubeDL:
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
- external_downloader_args, concurrent_fragment_downloads.
+ external_downloader_args, concurrent_fragment_downloads, progress_delta.
The following options are used by the post processors:
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
@@ -713,6 +718,13 @@ class YoutubeDL:
for msg in self.params.get('_deprecation_warnings', []):
self.deprecated_feature(msg)
+ if impersonate_target := self.params.get('impersonate'):
+ if not self._impersonate_target_available(impersonate_target):
+ raise YoutubeDLError(
+ f'Impersonate target "{impersonate_target}" is not available. '
+ f'Use --list-impersonate-targets to see available targets. '
+ f'You may be missing dependencies required to support this target.')
+
if 'list-formats' in self.params['compat_opts']:
self.params['listformats_table'] = False
@@ -811,7 +823,7 @@ class YoutubeDL:
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s' %
- args_to_str(correct_argv))
+ shell_quote(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
@@ -1343,7 +1355,7 @@ class YoutubeDL:
value, fmt = escapeHTML(str(value)), str_fmt
elif fmt[-1] == 'q': # quoted
value = map(str, variadic(value) if '#' in flags else [value])
- value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
+ value, fmt = shell_quote(value, shell=True), str_fmt
elif fmt[-1] == 'B': # bytes
value = f'%{str_fmt}'.encode() % str(value).encode()
value, fmt = value.decode('utf-8', 'ignore'), 's'
@@ -2617,7 +2629,7 @@ class YoutubeDL:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
with contextlib.suppress(ValueError, OverflowError, OSError):
- upload_date = datetime.datetime.fromtimestamp(info_dict[ts_key], datetime.timezone.utc)
+ upload_date = dt.datetime.fromtimestamp(info_dict[ts_key], dt.timezone.utc)
info_dict[date_key] = upload_date.strftime('%Y%m%d')
if not info_dict.get('release_year'):
@@ -2771,7 +2783,7 @@ class YoutubeDL:
get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
if not get_from_start:
- info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
+ info_dict['title'] += ' ' + dt.datetime.now().strftime('%Y-%m-%d %H:%M')
if info_dict.get('is_live') and formats:
formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
if get_from_start and not formats:
@@ -2802,6 +2814,9 @@ class YoutubeDL:
format['url'] = sanitize_url(format['url'])
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
+ if format['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
+ if format.get('acodec') is None:
+ format['acodec'] = format['ext']
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
if format.get('resolution') is None:
@@ -2812,9 +2827,8 @@ class YoutubeDL:
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
# For fragmented formats, "tbr" is often max bitrate and not average
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
- and info_dict.get('duration') and format.get('tbr')
and not format.get('filesize') and not format.get('filesize_approx')):
- format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
+ format['filesize_approx'] = filesize_from_tbr(format.get('tbr'), info_dict.get('duration'))
format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict), load_cookies=True)
# Safeguard against old/insecure infojson when using --load-info-json
@@ -3864,8 +3878,8 @@ class YoutubeDL:
delim, (
format_field(f, 'filesize', ' \t%s', func=format_bytes)
or format_field(f, 'filesize_approx', '≈\t%s', func=format_bytes)
- or format_field(try_call(lambda: format_bytes(int(info_dict['duration'] * f['tbr'] * (1024 / 8)))),
- None, self._format_out('~\t%s', self.Styles.SUPPRESS))),
+ or format_field(filesize_from_tbr(f.get('tbr'), info_dict.get('duration')), None,
+ self._format_out('~\t%s', self.Styles.SUPPRESS), func=format_bytes)),
format_field(f, 'tbr', '\t%dk', func=round),
shorten_protocol_name(f.get('protocol', '')),
delim,
@@ -4077,6 +4091,22 @@ class YoutubeDL:
handler = self._request_director.handlers['Urllib']
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
+ def _get_available_impersonate_targets(self):
+ # todo(future): make available as public API
+ return [
+ (target, rh.RH_NAME)
+ for rh in self._request_director.handlers.values()
+ if isinstance(rh, ImpersonateRequestHandler)
+ for target in rh.supported_targets
+ ]
+
+ def _impersonate_target_available(self, target):
+ # todo(future): make available as public API
+ return any(
+ rh.is_supported_target(target)
+ for rh in self._request_director.handlers.values()
+ if isinstance(rh, ImpersonateRequestHandler))
+
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, str):
@@ -4108,9 +4138,13 @@ class YoutubeDL:
raise RequestError(
'file:// URLs are disabled by default in yt-dlp for security reasons. '
'Use --enable-file-urls to enable at your own risk.', cause=ue) from ue
- if 'unsupported proxy type: "https"' in ue.msg.lower():
+ if (
+ 'unsupported proxy type: "https"' in ue.msg.lower()
+ and 'requests' not in self._request_director.handlers
+ and 'curl_cffi' not in self._request_director.handlers
+ ):
raise RequestError(
- 'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
+ 'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests, curl_cffi')
elif (
re.match(r'unsupported url scheme: "wss?"', ue.msg.lower())
@@ -4120,6 +4154,13 @@ class YoutubeDL:
'This request requires WebSocket support. '
'Ensure one of the following dependencies are installed: websockets',
cause=ue) from ue
+
+ elif re.match(r'unsupported (?:extensions: impersonate|impersonate target)', ue.msg.lower()):
+ raise RequestError(
+ f'Impersonate target "{req.extensions["impersonate"]}" is not available.'
+ f' See --list-impersonate-targets for available targets.'
+ f' This request requires browser impersonation, however you may be missing dependencies'
+ f' required to support this target.')
raise
except SSLError as e:
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
@@ -4152,6 +4193,7 @@ class YoutubeDL:
'timeout': 'socket_timeout',
'legacy_ssl_support': 'legacyserverconnect',
'enable_file_urls': 'enable_file_urls',
+ 'impersonate': 'impersonate',
'client_cert': {
'client_certificate': 'client_certificate',
'client_certificate_key': 'client_certificate_key',
diff --git a/yt_dlp/__init__.py b/yt_dlp/__init__.py
index aeea262..3d606bc 100644
--- a/yt_dlp/__init__.py
+++ b/yt_dlp/__init__.py
@@ -19,6 +19,7 @@ from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
from .downloader.external import get_external_downloader
from .extractor import list_extractor_classes
from .extractor.adobepass import MSO_INFO
+from .networking.impersonate import ImpersonateTarget
from .options import parseOpts
from .postprocessor import (
FFmpegExtractAudioPP,
@@ -48,6 +49,7 @@ from .utils import (
float_or_none,
format_field,
int_or_none,
+ join_nonempty,
match_filter_func,
parse_bytes,
parse_duration,
@@ -388,6 +390,9 @@ def validate_options(opts):
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
+ if opts.impersonate is not None:
+ opts.impersonate = ImpersonateTarget.from_str(opts.impersonate.lower())
+
# MetadataParser
def metadataparser_actions(f):
if isinstance(f, str):
@@ -831,6 +836,7 @@ def parse_options(argv=None):
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
'progress_with_newline': opts.progress_with_newline,
'progress_template': opts.progress_template,
+ 'progress_delta': opts.progress_delta,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'playlistreverse': opts.playlist_reverse,
@@ -911,6 +917,7 @@ def parse_options(argv=None):
'postprocessors': postprocessors,
'fixup': opts.fixup,
'source_address': opts.source_address,
+ 'impersonate': opts.impersonate,
'call_home': opts.call_home,
'sleep_interval_requests': opts.sleep_interval_requests,
'sleep_interval': opts.sleep_interval,
@@ -980,6 +987,41 @@ def _real_main(argv=None):
traceback.print_exc()
ydl._download_retcode = 100
+ if opts.list_impersonate_targets:
+
+ known_targets = [
+ # List of simplified targets we know are supported,
+ # to help users know what dependencies may be required.
+ (ImpersonateTarget('chrome'), 'curl_cffi'),
+ (ImpersonateTarget('edge'), 'curl_cffi'),
+ (ImpersonateTarget('safari'), 'curl_cffi'),
+ ]
+
+ available_targets = ydl._get_available_impersonate_targets()
+
+ def make_row(target, handler):
+ return [
+ join_nonempty(target.client.title(), target.version, delim='-') or '-',
+ join_nonempty((target.os or "").title(), target.os_version, delim='-') or '-',
+ handler,
+ ]
+
+ rows = [make_row(target, handler) for target, handler in available_targets]
+
+ for known_target, known_handler in known_targets:
+ if not any(
+ known_target in target and handler == known_handler
+ for target, handler in available_targets
+ ):
+ rows.append([
+ ydl._format_out(text, ydl.Styles.SUPPRESS)
+ for text in make_row(known_target, f'{known_handler} (not available)')
+ ])
+
+ ydl.to_screen('[info] Available impersonate targets')
+ ydl.to_stdout(render_table(['Client', 'OS', 'Source'], rows, extra_gap=2, delim='-'))
+ return
+
if not actual_use:
if pre_process:
return ydl._download_retcode
diff --git a/yt_dlp/__pyinstaller/hook-yt_dlp.py b/yt_dlp/__pyinstaller/hook-yt_dlp.py
index 7c3dbfb..8e7f42f 100644
--- a/yt_dlp/__pyinstaller/hook-yt_dlp.py
+++ b/yt_dlp/__pyinstaller/hook-yt_dlp.py
@@ -1,6 +1,6 @@
import sys
-from PyInstaller.utils.hooks import collect_submodules
+from PyInstaller.utils.hooks import collect_submodules, collect_data_files
def pycryptodome_module():
@@ -25,10 +25,12 @@ def get_hidden_imports():
for module in ('websockets', 'requests', 'urllib3'):
yield from collect_submodules(module)
# These are auto-detected, but explicitly add them just in case
- yield from ('mutagen', 'brotli', 'certifi', 'secretstorage')
+ yield from ('mutagen', 'brotli', 'certifi', 'secretstorage', 'curl_cffi')
hiddenimports = list(get_hidden_imports())
print(f'Adding imports: {hiddenimports}')
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
+
+datas = collect_data_files('curl_cffi', includes=['cacert.pem'])
diff --git a/yt_dlp/compat/__init__.py b/yt_dlp/compat/__init__.py
index 5ad5c70..d820ada 100644
--- a/yt_dlp/compat/__init__.py
+++ b/yt_dlp/compat/__init__.py
@@ -27,12 +27,9 @@ def compat_etree_fromstring(text):
compat_os_name = os._name if os.name == 'java' else os.name
-if compat_os_name == 'nt':
- def compat_shlex_quote(s):
- import re
- return s if re.match(r'^[-_\w./]+$', s) else s.replace('"', '""').join('""')
-else:
- from shlex import quote as compat_shlex_quote # noqa: F401
+def compat_shlex_quote(s):
+ from ..utils import shell_quote
+ return shell_quote(s)
def compat_ord(c):
diff --git a/yt_dlp/cookies.py b/yt_dlp/cookies.py
index 28d174a..7b8d215 100644
--- a/yt_dlp/cookies.py
+++ b/yt_dlp/cookies.py
@@ -1,6 +1,7 @@
import base64
import collections
import contextlib
+import datetime as dt
import glob
import http.cookiejar
import http.cookies
@@ -15,7 +16,6 @@ import sys
import tempfile
import time
import urllib.request
-from datetime import datetime, timedelta, timezone
from enum import Enum, auto
from hashlib import pbkdf2_hmac
@@ -194,7 +194,11 @@ def _firefox_browser_dirs():
yield os.path.expanduser('~/Library/Application Support/Firefox/Profiles')
else:
- yield from map(os.path.expanduser, ('~/.mozilla/firefox', '~/snap/firefox/common/.mozilla/firefox'))
+ yield from map(os.path.expanduser, (
+ '~/.mozilla/firefox',
+ '~/snap/firefox/common/.mozilla/firefox',
+ '~/.var/app/org.mozilla.firefox/.mozilla/firefox',
+ ))
def _firefox_cookie_dbs(roots):
@@ -594,7 +598,7 @@ class DataParser:
def _mac_absolute_time_to_posix(timestamp):
- return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
+ return int((dt.datetime(2001, 1, 1, 0, 0, tzinfo=dt.timezone.utc) + dt.timedelta(seconds=timestamp)).timestamp())
def _parse_safari_cookies_header(data, logger):
diff --git a/yt_dlp/dependencies/__init__.py b/yt_dlp/dependencies/__init__.py
index 9e3f907..0d58da2 100644
--- a/yt_dlp/dependencies/__init__.py
+++ b/yt_dlp/dependencies/__init__.py
@@ -74,6 +74,10 @@ else:
if hasattr(xattr, 'set'): # pyxattr
xattr._yt_dlp__identifier = 'pyxattr'
+try:
+ import curl_cffi
+except ImportError:
+ curl_cffi = None
from . import Cryptodome
diff --git a/yt_dlp/downloader/common.py b/yt_dlp/downloader/common.py
index b71d7ee..65a0d6f 100644
--- a/yt_dlp/downloader/common.py
+++ b/yt_dlp/downloader/common.py
@@ -4,6 +4,7 @@ import functools
import os
import random
import re
+import threading
import time
from ..minicurses import (
@@ -63,6 +64,7 @@ class FileDownloader:
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
+ progress_delta: The minimum time between progress output, in seconds
external_downloader_args: A dictionary of downloader keys (in lower case)
and a list of additional command-line arguments for the
executable. Use 'default' as the name for arguments to be
@@ -88,6 +90,9 @@ class FileDownloader:
self.params = params
self._prepare_multiline_status()
self.add_progress_hook(self.report_progress)
+ if self.params.get('progress_delta'):
+ self._progress_delta_lock = threading.Lock()
+ self._progress_delta_time = time.monotonic()
def _set_ydl(self, ydl):
self.ydl = ydl
@@ -366,6 +371,12 @@ class FileDownloader:
if s['status'] != 'downloading':
return
+ if update_delta := self.params.get('progress_delta'):
+ with self._progress_delta_lock:
+ if time.monotonic() < self._progress_delta_time:
+ return
+ self._progress_delta_time += update_delta
+
s.update({
'_eta_str': self.format_eta(s.get('eta')).strip(),
'_speed_str': self.format_speed(s.get('speed')),
diff --git a/yt_dlp/downloader/external.py b/yt_dlp/downloader/external.py
index ce5eeb0..8b0b94e 100644
--- a/yt_dlp/downloader/external.py
+++ b/yt_dlp/downloader/external.py
@@ -491,7 +491,7 @@ class FFmpegFD(ExternalFD):
if not self.params.get('verbose'):
args += ['-hide_banner']
- args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args'), default=[])
+ args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args', ...))
# These exists only for compatibility. Extractors should use
# info_dict['downloader_options']['ffmpeg_args'] instead
@@ -615,6 +615,8 @@ class FFmpegFD(ExternalFD):
else:
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
+ args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args_out', ...))
+
args += self._configuration_args(('_o1', '_o', ''))
args = [encodeArgument(opt) for opt in args]
diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py
index c753655..4203427 100644
--- a/yt_dlp/extractor/_extractors.py
+++ b/yt_dlp/extractor/_extractors.py
@@ -150,6 +150,7 @@ from .arte import (
)
from .arnes import ArnesIE
from .asobichannel import AsobiChannelIE, AsobiChannelTagURLIE
+from .asobistage import AsobiStageIE
from .atresplayer import AtresPlayerIE
from .atscaleconf import AtScaleConfEventIE
from .atvat import ATVAtIE
@@ -590,6 +591,7 @@ from .facebook import (
FacebookReelIE,
FacebookAdsIE,
)
+from .fathom import FathomIE
from .fancode import (
FancodeVodIE,
FancodeLiveIE
@@ -874,6 +876,7 @@ from .jeuxvideo import JeuxVideoIE
from .jiosaavn import (
JioSaavnSongIE,
JioSaavnAlbumIE,
+ JioSaavnPlaylistIE,
)
from .jove import JoveIE
from .joj import JojIE
@@ -989,6 +992,10 @@ from .lnkgo import (
LnkGoIE,
LnkIE,
)
+from .loom import (
+ LoomIE,
+ LoomFolderIE,
+)
from .lovehomeporn import LoveHomePornIE
from .lrt import (
LRTVODIE,
@@ -1750,6 +1757,7 @@ from .shahid import (
ShahidIE,
ShahidShowIE,
)
+from .sharepoint import SharePointIE
from .sharevideos import ShareVideosEmbedIE
from .sibnet import SibnetEmbedIE
from .shemaroome import ShemarooMeIE
@@ -2283,6 +2291,7 @@ from .vrt import (
VrtNUIE,
KetnetIE,
DagelijkseKostIE,
+ Radio1BeIE,
)
from .vtm import VTMIE
from .medialaan import MedialaanIE
diff --git a/yt_dlp/extractor/afreecatv.py b/yt_dlp/extractor/afreecatv.py
index 3d26d9c..2c33c90 100644
--- a/yt_dlp/extractor/afreecatv.py
+++ b/yt_dlp/extractor/afreecatv.py
@@ -1,25 +1,63 @@
import functools
-import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
OnDemandPagedList,
- date_from_str,
+ UserNotLive,
determine_ext,
+ filter_dict,
int_or_none,
- qualities,
- traverse_obj,
- unified_strdate,
unified_timestamp,
- update_url_query,
url_or_none,
urlencode_postdata,
- xpath_text,
)
+from ..utils.traversal import traverse_obj
-class AfreecaTVIE(InfoExtractor):
+class AfreecaTVBaseIE(InfoExtractor):
+ _NETRC_MACHINE = 'afreecatv'
+
+ def _perform_login(self, username, password):
+ login_form = {
+ 'szWork': 'login',
+ 'szType': 'json',
+ 'szUid': username,
+ 'szPassword': password,
+ 'isSaveId': 'false',
+ 'szScriptVar': 'oLoginRet',
+ 'szAction': '',
+ }
+
+ response = self._download_json(
+ 'https://login.afreecatv.com/app/LoginAction.php', None,
+ 'Logging in', data=urlencode_postdata(login_form))
+
+ _ERRORS = {
+ -4: 'Your account has been suspended due to a violation of our terms and policies.',
+ -5: 'https://member.afreecatv.com/app/user_delete_progress.php',
+ -6: 'https://login.afreecatv.com/membership/changeMember.php',
+ -8: "Hello! AfreecaTV here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.",
+ -9: 'https://member.afreecatv.com/app/pop_login_block.php',
+ -11: 'https://login.afreecatv.com/afreeca/second_login.php',
+ -12: 'https://member.afreecatv.com/app/user_security.php',
+ 0: 'The username does not exist or you have entered the wrong password.',
+ -1: 'The username does not exist or you have entered the wrong password.',
+ -3: 'You have entered your username/password incorrectly.',
+ -7: 'You cannot use your Global AfreecaTV account to access Korean AfreecaTV.',
+ -10: 'Sorry for the inconvenience. \nYour account has been blocked due to an unauthorized access. \nPlease contact our Help Center for assistance.',
+ -32008: 'You have failed to log in. Please contact our Help Center.',
+ }
+
+ result = int_or_none(response.get('RESULT'))
+ if result != 1:
+ error = _ERRORS.get(result, 'You have failed to log in.')
+ raise ExtractorError(
+ 'Unable to login: %s said: %s' % (self.IE_NAME, error),
+ expected=True)
+
+
+class AfreecaTVIE(AfreecaTVBaseIE):
IE_NAME = 'afreecatv'
IE_DESC = 'afreecatv.com'
_VALID_URL = r'''(?x)
@@ -34,7 +72,6 @@ class AfreecaTVIE(InfoExtractor):
)
(?P<id>\d+)
'''
- _NETRC_MACHINE = 'afreecatv'
_TESTS = [{
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
@@ -87,6 +124,7 @@ class AfreecaTVIE(InfoExtractor):
'uploader': '♥이슬이',
'uploader_id': 'dasl8121',
'upload_date': '20170411',
+ 'timestamp': 1491929865,
'duration': 213,
},
'params': {
@@ -120,219 +158,102 @@ class AfreecaTVIE(InfoExtractor):
'uploader_id': 'rlantnghks',
'uploader': '페이즈으',
'duration': 10840,
- 'thumbnail': 'http://videoimg.afreecatv.com/php/SnapshotLoad.php?rowKey=20230108_9FF5BEE1_244432674_1_r',
+ 'thumbnail': r're:https?://videoimg\.afreecatv\.com/.+',
'upload_date': '20230108',
+ 'timestamp': 1673218805,
'title': '젠지 페이즈',
},
'params': {
'skip_download': True,
},
+ }, {
+ # adult content
+ 'url': 'https://vod.afreecatv.com/player/70395877',
+ 'only_matching': True,
+ }, {
+ # subscribers only
+ 'url': 'https://vod.afreecatv.com/player/104647403',
+ 'only_matching': True,
+ }, {
+ # private
+ 'url': 'https://vod.afreecatv.com/player/81669846',
+ 'only_matching': True,
}]
- @staticmethod
- def parse_video_key(key):
- video_key = {}
- m = re.match(r'^(?P<upload_date>\d{8})_\w+_(?P<part>\d+)$', key)
- if m:
- video_key['upload_date'] = m.group('upload_date')
- video_key['part'] = int(m.group('part'))
- return video_key
-
- def _perform_login(self, username, password):
- login_form = {
- 'szWork': 'login',
- 'szType': 'json',
- 'szUid': username,
- 'szPassword': password,
- 'isSaveId': 'false',
- 'szScriptVar': 'oLoginRet',
- 'szAction': '',
- }
-
- response = self._download_json(
- 'https://login.afreecatv.com/app/LoginAction.php', None,
- 'Logging in', data=urlencode_postdata(login_form))
-
- _ERRORS = {
- -4: 'Your account has been suspended due to a violation of our terms and policies.',
- -5: 'https://member.afreecatv.com/app/user_delete_progress.php',
- -6: 'https://login.afreecatv.com/membership/changeMember.php',
- -8: "Hello! AfreecaTV here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.",
- -9: 'https://member.afreecatv.com/app/pop_login_block.php',
- -11: 'https://login.afreecatv.com/afreeca/second_login.php',
- -12: 'https://member.afreecatv.com/app/user_security.php',
- 0: 'The username does not exist or you have entered the wrong password.',
- -1: 'The username does not exist or you have entered the wrong password.',
- -3: 'You have entered your username/password incorrectly.',
- -7: 'You cannot use your Global AfreecaTV account to access Korean AfreecaTV.',
- -10: 'Sorry for the inconvenience. \nYour account has been blocked due to an unauthorized access. \nPlease contact our Help Center for assistance.',
- -32008: 'You have failed to log in. Please contact our Help Center.',
- }
-
- result = int_or_none(response.get('RESULT'))
- if result != 1:
- error = _ERRORS.get(result, 'You have failed to log in.')
- raise ExtractorError(
- 'Unable to login: %s said: %s' % (self.IE_NAME, error),
- expected=True)
-
def _real_extract(self, url):
video_id = self._match_id(url)
-
- partial_view = False
- adult_view = False
- for _ in range(2):
- data = self._download_json(
- 'https://api.m.afreecatv.com/station/video/a/view',
- video_id, headers={'Referer': url}, data=urlencode_postdata({
- 'nTitleNo': video_id,
- 'nApiLevel': 10,
- }))['data']
- if traverse_obj(data, ('code', {int})) == -6221:
- raise ExtractorError('The VOD does not exist', expected=True)
- query = {
+ data = self._download_json(
+ 'https://api.m.afreecatv.com/station/video/a/view', video_id,
+ headers={'Referer': url}, data=urlencode_postdata({
'nTitleNo': video_id,
- 'nStationNo': data['station_no'],
- 'nBbsNo': data['bbs_no'],
- }
- if partial_view:
- query['partialView'] = 'SKIP_ADULT'
- if adult_view:
- query['adultView'] = 'ADULT_VIEW'
- video_xml = self._download_xml(
- 'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
- video_id, 'Downloading video info XML%s'
- % (' (skipping adult)' if partial_view else ''),
- video_id, headers={
- 'Referer': url,
- }, query=query)
-
- flag = xpath_text(video_xml, './track/flag', 'flag', default=None)
- if flag and flag == 'SUCCEED':
- break
- if flag == 'PARTIAL_ADULT':
- self.report_warning(
- 'In accordance with local laws and regulations, underage users are restricted from watching adult content. '
- 'Only content suitable for all ages will be downloaded. '
- 'Provide account credentials if you wish to download restricted content.')
- partial_view = True
- continue
- elif flag == 'ADULT':
- if not adult_view:
- adult_view = True
- continue
- error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.'
- else:
- error = flag
- raise ExtractorError(
- '%s said: %s' % (self.IE_NAME, error), expected=True)
- else:
- raise ExtractorError('Unable to download video info')
-
- video_element = video_xml.findall('./track/video')[-1]
- if video_element is None or video_element.text is None:
- raise ExtractorError(
- 'Video %s does not exist' % video_id, expected=True)
-
- video_url = video_element.text.strip()
-
- title = xpath_text(video_xml, './track/title', 'title', fatal=True)
-
- uploader = xpath_text(video_xml, './track/nickname', 'uploader')
- uploader_id = xpath_text(video_xml, './track/bj_id', 'uploader id')
- duration = int_or_none(xpath_text(
- video_xml, './track/duration', 'duration'))
- thumbnail = xpath_text(video_xml, './track/titleImage', 'thumbnail')
-
- common_entry = {
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'thumbnail': thumbnail,
- }
-
- info = common_entry.copy()
- info.update({
- 'id': video_id,
- 'title': title,
- 'duration': duration,
+ 'nApiLevel': 10,
+ }))['data']
+
+ error_code = traverse_obj(data, ('code', {int}))
+ if error_code == -6221:
+ raise ExtractorError('The VOD does not exist', expected=True)
+ elif error_code == -6205:
+ raise ExtractorError('This VOD is private', expected=True)
+
+ common_info = traverse_obj(data, {
+ 'title': ('title', {str}),
+ 'uploader': ('writer_nick', {str}),
+ 'uploader_id': ('bj_id', {str}),
+ 'duration': ('total_file_duration', {functools.partial(int_or_none, scale=1000)}),
+ 'thumbnail': ('thumb', {url_or_none}),
})
- if not video_url:
- entries = []
- file_elements = video_element.findall('./file')
- one = len(file_elements) == 1
- for file_num, file_element in enumerate(file_elements, start=1):
- file_url = url_or_none(file_element.text)
- if not file_url:
- continue
- key = file_element.get('key', '')
- upload_date = unified_strdate(self._search_regex(
- r'^(\d{8})_', key, 'upload date', default=None))
- if upload_date is not None:
- # sometimes the upload date isn't included in the file name
- # instead, another random ID is, which may parse as a valid
- # date but be wildly out of a reasonable range
- parsed_date = date_from_str(upload_date)
- if parsed_date.year < 2000 or parsed_date.year >= 2100:
- upload_date = None
- file_duration = int_or_none(file_element.get('duration'))
- format_id = key if key else '%s_%s' % (video_id, file_num)
- if determine_ext(file_url) == 'm3u8':
- formats = self._extract_m3u8_formats(
- file_url, video_id, 'mp4', entry_protocol='m3u8_native',
- m3u8_id='hls',
- note='Downloading part %d m3u8 information' % file_num)
- else:
- formats = [{
- 'url': file_url,
- 'format_id': 'http',
- }]
- if not formats and not self.get_param('ignore_no_formats'):
- continue
- file_info = common_entry.copy()
- file_info.update({
- 'id': format_id,
- 'title': title if one else '%s (part %d)' % (title, file_num),
- 'upload_date': upload_date,
- 'duration': file_duration,
- 'formats': formats,
+ entries = []
+ for file_num, file_element in enumerate(
+ traverse_obj(data, ('files', lambda _, v: url_or_none(v['file']))), start=1):
+ file_url = file_element['file']
+ if determine_ext(file_url) == 'm3u8':
+ formats = self._extract_m3u8_formats(
+ file_url, video_id, 'mp4', m3u8_id='hls',
+ note=f'Downloading part {file_num} m3u8 information')
+ else:
+ formats = [{
+ 'url': file_url,
+ 'format_id': 'http',
+ }]
+
+ entries.append({
+ **common_info,
+ 'id': file_element.get('file_info_key') or f'{video_id}_{file_num}',
+ 'title': f'{common_info.get("title") or "Untitled"} (part {file_num})',
+ 'formats': formats,
+ **traverse_obj(file_element, {
+ 'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
+ 'timestamp': ('file_start', {unified_timestamp}),
})
- entries.append(file_info)
- entries_info = info.copy()
- entries_info.update({
- '_type': 'multi_video',
- 'entries': entries,
})
- return entries_info
-
- info = {
- 'id': video_id,
- 'title': title,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'duration': duration,
- 'thumbnail': thumbnail,
- }
- if determine_ext(video_url) == 'm3u8':
- info['formats'] = self._extract_m3u8_formats(
- video_url, video_id, 'mp4', entry_protocol='m3u8_native',
- m3u8_id='hls')
- else:
- app, playpath = video_url.split('mp4:')
- info.update({
- 'url': app,
- 'ext': 'flv',
- 'play_path': 'mp4:' + playpath,
- 'rtmp_live': True, # downloading won't end without this
- })
+ if traverse_obj(data, ('adult_status', {str})) == 'notLogin':
+ if not entries:
+ self.raise_login_required(
+ 'Only users older than 19 are able to watch this video', method='password')
+ self.report_warning(
+ 'In accordance with local laws and regulations, underage users are '
+ 'restricted from watching adult content. Only content suitable for all '
+ f'ages will be downloaded. {self._login_hint("password")}')
+
+ if not entries and traverse_obj(data, ('sub_upload_type', {str})):
+ self.raise_login_required('This VOD is for subscribers only', method='password')
+
+ if len(entries) == 1:
+ return {
+ **entries[0],
+ 'title': common_info.get('title'),
+ }
- return info
+ common_info['timestamp'] = traverse_obj(entries, (..., 'timestamp'), get_all=False)
+ return self.playlist_result(entries, video_id, multi_video=True, **common_info)
-class AfreecaTVLiveIE(AfreecaTVIE): # XXX: Do not subclass from concrete IE
+class AfreecaTVLiveIE(AfreecaTVBaseIE):
IE_NAME = 'afreecatv:live'
+ IE_DESC = 'afreecatv.com livestreams'
_VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P<id>[^/]+)(?:/(?P<bno>\d+))?'
_TESTS = [{
'url': 'https://play.afreecatv.com/pyh3646/237852185',
@@ -347,77 +268,57 @@ class AfreecaTVLiveIE(AfreecaTVIE): # XXX: Do not subclass from concrete IE
},
'skip': 'Livestream has ended',
}, {
- 'url': 'http://play.afreeca.com/pyh3646/237852185',
+ 'url': 'https://play.afreecatv.com/pyh3646/237852185',
'only_matching': True,
}, {
- 'url': 'http://play.afreeca.com/pyh3646',
+ 'url': 'https://play.afreecatv.com/pyh3646',
'only_matching': True,
}]
_LIVE_API_URL = 'https://live.afreecatv.com/afreeca/player_live_api.php'
- _QUALITIES = ('sd', 'hd', 'hd2k', 'original')
-
def _real_extract(self, url):
broadcaster_id, broadcast_no = self._match_valid_url(url).group('id', 'bno')
- password = self.get_param('videopassword')
+ channel_info = traverse_obj(self._download_json(
+ self._LIVE_API_URL, broadcaster_id, data=urlencode_postdata({'bid': broadcaster_id})),
+ ('CHANNEL', {dict})) or {}
- info = self._download_json(self._LIVE_API_URL, broadcaster_id, fatal=False,
- data=urlencode_postdata({'bid': broadcaster_id})) or {}
- channel_info = info.get('CHANNEL') or {}
broadcaster_id = channel_info.get('BJID') or broadcaster_id
broadcast_no = channel_info.get('BNO') or broadcast_no
- password_protected = channel_info.get('BPWD')
if not broadcast_no:
- raise ExtractorError(f'Unable to extract broadcast number ({broadcaster_id} may not be live)', expected=True)
- if password_protected == 'Y' and password is None:
+ raise UserNotLive(video_id=broadcaster_id)
+
+ password = self.get_param('videopassword')
+ if channel_info.get('BPWD') == 'Y' and password is None:
raise ExtractorError(
'This livestream is protected by a password, use the --video-password option',
expected=True)
- formats = []
- quality_key = qualities(self._QUALITIES)
- for quality_str in self._QUALITIES:
- params = {
+ aid = self._download_json(
+ self._LIVE_API_URL, broadcast_no, 'Downloading access token for stream',
+ 'Unable to download access token for stream', data=urlencode_postdata(filter_dict({
'bno': broadcast_no,
'stream_type': 'common',
'type': 'aid',
- 'quality': quality_str,
- }
- if password is not None:
- params['pwd'] = password
- aid_response = self._download_json(
- self._LIVE_API_URL, broadcast_no, fatal=False,
- data=urlencode_postdata(params),
- note=f'Downloading access token for {quality_str} stream',
- errnote=f'Unable to download access token for {quality_str} stream')
- aid = traverse_obj(aid_response, ('CHANNEL', 'AID'))
- if not aid:
- continue
-
- stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
- stream_info = self._download_json(
- f'{stream_base_url}/broad_stream_assign.html', broadcast_no, fatal=False,
- query={
- 'return_type': channel_info.get('CDN', 'gcp_cdn'),
- 'broad_key': f'{broadcast_no}-common-{quality_str}-hls',
- },
- note=f'Downloading metadata for {quality_str} stream',
- errnote=f'Unable to download metadata for {quality_str} stream') or {}
-
- if stream_info.get('view_url'):
- formats.append({
- 'format_id': quality_str,
- 'url': update_url_query(stream_info['view_url'], {'aid': aid}),
- 'ext': 'mp4',
- 'protocol': 'm3u8',
- 'quality': quality_key(quality_str),
- })
-
- station_info = self._download_json(
+ 'quality': 'master',
+ 'pwd': password,
+ })))['CHANNEL']['AID']
+
+ stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
+ stream_info = self._download_json(f'{stream_base_url}/broad_stream_assign.html', broadcast_no, query={
+ # works: gs_cdn_pc_app, gs_cdn_mobile_web, gs_cdn_pc_web
+ 'return_type': 'gs_cdn_pc_app',
+ 'broad_key': f'{broadcast_no}-common-master-hls',
+ }, note='Downloading metadata for stream', errnote='Unable to download metadata for stream')
+
+ formats = self._extract_m3u8_formats(
+ stream_info['view_url'], broadcast_no, 'mp4', m3u8_id='hls',
+ query={'aid': aid}, headers={'Referer': url})
+
+ station_info = traverse_obj(self._download_json(
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,
- query={'szBjId': broadcaster_id}, fatal=False,
- note='Downloading channel metadata', errnote='Unable to download channel metadata') or {}
+ 'Downloading channel metadata', 'Unable to download channel metadata',
+ query={'szBjId': broadcaster_id}, fatal=False), {dict}) or {}
return {
'id': broadcast_no,
@@ -427,6 +328,7 @@ class AfreecaTVLiveIE(AfreecaTVIE): # XXX: Do not subclass from concrete IE
'timestamp': unified_timestamp(station_info.get('broad_start')),
'formats': formats,
'is_live': True,
+ 'http_headers': {'Referer': url},
}
diff --git a/yt_dlp/extractor/ard.py b/yt_dlp/extractor/ard.py
index 46e68d6..3db59c5 100644
--- a/yt_dlp/extractor/ard.py
+++ b/yt_dlp/extractor/ard.py
@@ -1,5 +1,5 @@
+import functools
import re
-from functools import partial
from .common import InfoExtractor
from ..utils import (
@@ -349,7 +349,7 @@ class ARDBetaMediathekIE(InfoExtractor):
r'(?P<title>.*)',
]
- return traverse_obj(patterns, (..., {partial(re.match, string=title)}, {
+ return traverse_obj(patterns, (..., {functools.partial(re.match, string=title)}, {
'season_number': ('season_number', {int_or_none}),
'episode_number': ('episode_number', {int_or_none}),
'episode': ((
diff --git a/yt_dlp/extractor/asobistage.py b/yt_dlp/extractor/asobistage.py
new file mode 100644
index 0000000..b088a1b
--- /dev/null
+++ b/yt_dlp/extractor/asobistage.py
@@ -0,0 +1,154 @@
+import functools
+
+from .common import InfoExtractor
+from ..utils import str_or_none, url_or_none
+from ..utils.traversal import traverse_obj
+
+
+class AsobiStageIE(InfoExtractor):
+ IE_DESC = 'ASOBISTAGE (アソビステージ)'
+ _VALID_URL = r'https?://asobistage\.asobistore\.jp/event/(?P<id>(?P<event>\w+)/(?P<type>archive|player)/(?P<slug>\w+))(?:[?#]|$)'
+ _TESTS = [{
+ 'url': 'https://asobistage.asobistore.jp/event/315passionhour_2022summer/archive/frame',
+ 'info_dict': {
+ 'id': '315passionhour_2022summer/archive/frame',
+ 'title': '315プロダクションプレゼンツ 315パッションアワー!!!',
+ 'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
+ },
+ 'playlist_count': 1,
+ 'playlist': [{
+ 'info_dict': {
+ 'id': 'edff52f2',
+ 'ext': 'mp4',
+ 'title': '315passion_FRAME_only',
+ 'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
+ },
+ }],
+ }, {
+ 'url': 'https://asobistage.asobistore.jp/event/idolmaster_idolworld2023_goods/archive/live',
+ 'info_dict': {
+ 'id': 'idolmaster_idolworld2023_goods/archive/live',
+ 'title': 'md5:378510b6e830129d505885908bd6c576',
+ 'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
+ },
+ 'playlist_count': 1,
+ 'playlist': [{
+ 'info_dict': {
+ 'id': '3aef7110',
+ 'ext': 'mp4',
+ 'title': 'asobistore_station_1020_serverREC',
+ 'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
+ },
+ }],
+ }, {
+ 'url': 'https://asobistage.asobistore.jp/event/sidem_fclive_bpct/archive/premium_hc',
+ 'playlist_count': 4,
+ 'info_dict': {
+ 'id': 'sidem_fclive_bpct/archive/premium_hc',
+ 'title': '315 Production presents F@NTASTIC COMBINATION LIVE ~BRAINPOWER!!~/~CONNECTIME!!!!~',
+ 'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
+ },
+ }, {
+ 'url': 'https://asobistage.asobistore.jp/event/ijigenfes_utagassen/player/day1',
+ 'only_matching': True,
+ }]
+
+ _API_HOST = 'https://asobistage-api.asobistore.jp'
+ _HEADERS = {}
+ _is_logged_in = False
+
+ @functools.cached_property
+ def _owned_tickets(self):
+ owned_tickets = set()
+ if not self._is_logged_in:
+ return owned_tickets
+
+ for path, name in [
+ ('api/v1/purchase_history/list', 'ticket purchase history'),
+ ('api/v1/serialcode/list', 'redemption history'),
+ ]:
+ response = self._download_json(
+ f'{self._API_HOST}/{path}', None, f'Downloading {name}',
+ f'Unable to download {name}', expected_status=400)
+ if traverse_obj(response, ('payload', 'error_message'), 'error') == 'notlogin':
+ self._is_logged_in = False
+ break
+ owned_tickets.update(
+ traverse_obj(response, ('payload', 'value', ..., 'digital_product_id', {str_or_none})))
+
+ return owned_tickets
+
+ def _get_available_channel_id(self, channel):
+ channel_id = traverse_obj(channel, ('chennel_vspf_id', {str}))
+ if not channel_id:
+ return None
+ # if rights_type_id == 6, then 'No conditions (no login required - non-members are OK)'
+ if traverse_obj(channel, ('viewrights', lambda _, v: v['rights_type_id'] == 6)):
+ return channel_id
+ available_tickets = traverse_obj(channel, (
+ 'viewrights', ..., ('tickets', 'serialcodes'), ..., 'digital_product_id', {str_or_none}))
+ if not self._owned_tickets.intersection(available_tickets):
+ self.report_warning(
+ f'You are not a ticketholder for "{channel.get("channel_name") or channel_id}"')
+ return None
+ return channel_id
+
+ def _real_initialize(self):
+ if self._get_cookies(self._API_HOST):
+ self._is_logged_in = True
+ token = self._download_json(
+ f'{self._API_HOST}/api/v1/vspf/token', None, 'Getting token', 'Unable to get token')
+ self._HEADERS['Authorization'] = f'Bearer {token}'
+
+ def _real_extract(self, url):
+ video_id, event, type_, slug = self._match_valid_url(url).group('id', 'event', 'type', 'slug')
+ video_type = {'archive': 'archives', 'player': 'broadcasts'}[type_]
+ webpage = self._download_webpage(url, video_id)
+ event_data = traverse_obj(
+ self._search_nextjs_data(webpage, video_id, default='{}'),
+ ('props', 'pageProps', 'eventCMSData', {
+ 'title': ('event_name', {str}),
+ 'thumbnail': ('event_thumbnail_image', {url_or_none}),
+ }))
+
+ available_channels = traverse_obj(self._download_json(
+ f'https://asobistage.asobistore.jp/cdn/v101/events/{event}/{video_type}.json',
+ video_id, 'Getting channel list', 'Unable to get channel list'), (
+ video_type, lambda _, v: v['broadcast_slug'] == slug,
+ 'channels', lambda _, v: v['chennel_vspf_id'] != '00000'))
+
+ entries = []
+ for channel_id in traverse_obj(available_channels, (..., {self._get_available_channel_id})):
+ if video_type == 'archives':
+ channel_json = self._download_json(
+ f'https://survapi.channel.or.jp/proxy/v1/contents/{channel_id}/get_by_cuid', channel_id,
+ 'Getting archive channel info', 'Unable to get archive channel info', fatal=False,
+ headers=self._HEADERS)
+ channel_data = traverse_obj(channel_json, ('ex_content', {
+ 'm3u8_url': 'streaming_url',
+ 'title': 'title',
+ 'thumbnail': ('thumbnail', 'url'),
+ }))
+ else: # video_type == 'broadcasts'
+ channel_json = self._download_json(
+ f'https://survapi.channel.or.jp/ex/events/{channel_id}', channel_id,
+ 'Getting live channel info', 'Unable to get live channel info', fatal=False,
+ headers=self._HEADERS, query={'embed': 'channel'})
+ channel_data = traverse_obj(channel_json, ('data', {
+ 'm3u8_url': ('Channel', 'Custom_live_url'),
+ 'title': 'Name',
+ 'thumbnail': 'Poster_url',
+ }))
+
+ entries.append({
+ 'id': channel_id,
+ 'title': channel_data.get('title'),
+ 'formats': self._extract_m3u8_formats(channel_data.get('m3u8_url'), channel_id, fatal=False),
+ 'is_live': video_type == 'broadcasts',
+ 'thumbnail': url_or_none(channel_data.get('thumbnail')),
+ })
+
+ if not self._is_logged_in and not entries:
+ self.raise_login_required()
+
+ return self.playlist_result(entries, video_id, **event_data)
diff --git a/yt_dlp/extractor/atvat.py b/yt_dlp/extractor/atvat.py
index d6ed9e4..d60feba 100644
--- a/yt_dlp/extractor/atvat.py
+++ b/yt_dlp/extractor/atvat.py
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
from .common import InfoExtractor
from ..utils import (
@@ -71,9 +71,9 @@ class ATVAtIE(InfoExtractor):
content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']}
for id, content in enumerate(contentResource)]
- time_of_request = datetime.datetime.now()
- not_before = time_of_request - datetime.timedelta(minutes=5)
- expire = time_of_request + datetime.timedelta(minutes=5)
+ time_of_request = dt.datetime.now()
+ not_before = time_of_request - dt.timedelta(minutes=5)
+ expire = time_of_request + dt.timedelta(minutes=5)
payload = {
'content_ids': {
content_id: content_ids,
diff --git a/yt_dlp/extractor/aws.py b/yt_dlp/extractor/aws.py
index c4741a6..4ebef92 100644
--- a/yt_dlp/extractor/aws.py
+++ b/yt_dlp/extractor/aws.py
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
import hashlib
import hmac
@@ -12,7 +12,7 @@ class AWSIE(InfoExtractor): # XXX: Conventionally, base classes should end with
def _aws_execute_api(self, aws_dict, video_id, query=None):
query = query or {}
- amz_date = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
+ amz_date = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
date = amz_date[:8]
headers = {
'Accept': 'application/json',
diff --git a/yt_dlp/extractor/bibeltv.py b/yt_dlp/extractor/bibeltv.py
index 34464da..666b51c 100644
--- a/yt_dlp/extractor/bibeltv.py
+++ b/yt_dlp/extractor/bibeltv.py
@@ -1,4 +1,4 @@
-from functools import partial
+import functools
from .common import InfoExtractor
from ..utils import (
@@ -50,7 +50,7 @@ class BibelTVBaseIE(InfoExtractor):
**traverse_obj(data, {
'title': 'title',
'description': 'description',
- 'duration': ('duration', {partial(int_or_none, scale=1000)}),
+ 'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
'timestamp': ('schedulingStart', {parse_iso8601}),
'season_number': 'seasonNumber',
'episode_number': 'episodeNumber',
diff --git a/yt_dlp/extractor/box.py b/yt_dlp/extractor/box.py
index 7281b3c..008c011 100644
--- a/yt_dlp/extractor/box.py
+++ b/yt_dlp/extractor/box.py
@@ -3,6 +3,7 @@ import urllib.parse
from .common import InfoExtractor
from ..utils import (
+ ExtractorError,
parse_iso8601,
update_url_query,
url_or_none,
@@ -11,8 +12,8 @@ from ..utils.traversal import traverse_obj
class BoxIE(InfoExtractor):
- _VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/?#]+)/file/(?P<id>\d+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/?#]+)(?:/file/(?P<id>\d+))?'
+ _TESTS = [{
'url': 'https://mlssoccer.app.box.com/s/0evd2o3e08l60lr4ygukepvnkord1o1x/file/510727257538',
'md5': '1f81b2fd3960f38a40a3b8823e5fcd43',
'info_dict': {
@@ -25,14 +26,36 @@ class BoxIE(InfoExtractor):
'uploader_id': '235196876',
},
'params': {'skip_download': 'dash fragment too small'},
- }
+ }, {
+ 'url': 'https://utexas.app.box.com/s/2x6vanv85fdl8j2eqlcxmv0gp1wvps6e',
+ 'info_dict': {
+ 'id': '787379022466',
+ 'ext': 'mp4',
+ 'title': 'Webinar recording: Take the Leap!.mp4',
+ 'uploader': 'Patricia Mosele',
+ 'timestamp': 1615824864,
+ 'upload_date': '20210315',
+ 'uploader_id': '239068974',
+ },
+ 'params': {'skip_download': 'dash fragment too small'},
+ }]
def _real_extract(self, url):
shared_name, file_id = self._match_valid_url(url).groups()
- webpage = self._download_webpage(url, file_id)
- request_token = self._parse_json(self._search_regex(
- r'Box\.config\s*=\s*({.+?});', webpage,
- 'Box config'), file_id)['requestToken']
+ webpage = self._download_webpage(url, file_id or shared_name)
+
+ if not file_id:
+ post_stream_data = self._search_json(
+ r'Box\.postStreamData\s*=', webpage, 'Box post-stream data', shared_name)
+ shared_item = traverse_obj(
+ post_stream_data, ('/app-api/enduserapp/shared-item', {dict})) or {}
+ if shared_item.get('itemType') != 'file':
+ raise ExtractorError('The requested resource is not a file', expected=True)
+
+ file_id = str(shared_item['itemID'])
+
+ request_token = self._search_json(
+ r'Box\.config\s*=', webpage, 'Box config', file_id)['requestToken']
access_token = self._download_json(
'https://app.box.com/app-api/enduserapp/elements/tokens', file_id,
'Downloading token JSON metadata',
diff --git a/yt_dlp/extractor/bundestag.py b/yt_dlp/extractor/bundestag.py
index 9fd7c7d..71f7726 100644
--- a/yt_dlp/extractor/bundestag.py
+++ b/yt_dlp/extractor/bundestag.py
@@ -1,5 +1,5 @@
+import functools
import re
-from functools import partial
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
@@ -115,9 +115,9 @@ class BundestagIE(InfoExtractor):
note='Downloading metadata overlay', fatal=False,
), {
'title': (
- {partial(get_element_text_and_html_by_tag, 'h3')}, 0,
- {partial(re.sub, r'<span[^>]*>[^<]+</span>', '')}, {clean_html}),
- 'description': ({partial(get_element_text_and_html_by_tag, 'p')}, 0, {clean_html}),
+ {functools.partial(get_element_text_and_html_by_tag, 'h3')}, 0,
+ {functools.partial(re.sub, r'<span[^>]*>[^<]+</span>', '')}, {clean_html}),
+ 'description': ({functools.partial(get_element_text_and_html_by_tag, 'p')}, 0, {clean_html}),
}))
return result
diff --git a/yt_dlp/extractor/cbc.py b/yt_dlp/extractor/cbc.py
index b5beb1e..ff320dd 100644
--- a/yt_dlp/extractor/cbc.py
+++ b/yt_dlp/extractor/cbc.py
@@ -151,7 +151,7 @@ class CBCIE(InfoExtractor):
class CBCPlayerIE(InfoExtractor):
IE_NAME = 'cbc.ca:player'
- _VALID_URL = r'(?:cbcplayer:|https?://(?:www\.)?cbc\.ca/(?:player/play/|i/caffeine/syndicate/\?mediaId=))(?P<id>\d+)'
+ _VALID_URL = r'(?:cbcplayer:|https?://(?:www\.)?cbc\.ca/(?:player/play/|i/caffeine/syndicate/\?mediaId=))(?P<id>(?:\d\.)?\d+)'
_TESTS = [{
'url': 'http://www.cbc.ca/player/play/2683190193',
'md5': '64d25f841ddf4ddb28a235338af32e2c',
@@ -166,8 +166,51 @@ class CBCPlayerIE(InfoExtractor):
},
'skip': 'Geo-restricted to Canada and no longer available',
}, {
+ 'url': 'http://www.cbc.ca/i/caffeine/syndicate/?mediaId=2657631896',
+ 'md5': 'e5e708c34ae6fca156aafe17c43e8b75',
+ 'info_dict': {
+ 'id': '2657631896',
+ 'ext': 'mp3',
+ 'title': 'CBC Montreal is organizing its first ever community hackathon!',
+ 'description': 'md5:dd3b692f0a139b0369943150bd1c46a9',
+ 'timestamp': 1425704400,
+ 'upload_date': '20150307',
+ 'uploader': 'CBCC-NEW',
+ 'thumbnail': 'http://thumbnails.cbc.ca/maven_legacy/thumbnails/sonali-karnick-220.jpg',
+ 'chapters': [],
+ 'duration': 494.811,
+ 'categories': ['AudioMobile/All in a Weekend Montreal'],
+ 'tags': 'count:8',
+ 'location': 'Quebec',
+ 'series': 'All in a Weekend Montreal',
+ 'season': 'Season 2015',
+ 'season_number': 2015,
+ 'media_type': 'Excerpt',
+ },
+ }, {
+ 'url': 'http://www.cbc.ca/i/caffeine/syndicate/?mediaId=2164402062',
+ 'md5': '33fcd8f6719b9dd60a5e73adcb83b9f6',
+ 'info_dict': {
+ 'id': '2164402062',
+ 'ext': 'mp4',
+ 'title': 'Cancer survivor four times over',
+ 'description': 'Tim Mayer has beaten three different forms of cancer four times in five years.',
+ 'timestamp': 1320410746,
+ 'upload_date': '20111104',
+ 'uploader': 'CBCC-NEW',
+ 'thumbnail': 'https://thumbnails.cbc.ca/maven_legacy/thumbnails/277/67/cancer_852x480_2164412612.jpg',
+ 'chapters': [],
+ 'duration': 186.867,
+ 'series': 'CBC News: Windsor at 6:00',
+ 'categories': ['News/Canada/Windsor'],
+ 'location': 'Windsor',
+ 'tags': ['cancer'],
+ 'creators': ['Allison Johnson'],
+ 'media_type': 'Excerpt',
+ },
+ }, {
# Redirected from http://www.cbc.ca/player/AudioMobile/All%20in%20a%20Weekend%20Montreal/ID/2657632011/
- 'url': 'http://www.cbc.ca/player/play/2657631896',
+ 'url': 'https://www.cbc.ca/player/play/1.2985700',
'md5': 'e5e708c34ae6fca156aafe17c43e8b75',
'info_dict': {
'id': '2657631896',
@@ -189,7 +232,7 @@ class CBCPlayerIE(InfoExtractor):
'media_type': 'Excerpt',
},
}, {
- 'url': 'http://www.cbc.ca/player/play/2164402062',
+ 'url': 'https://www.cbc.ca/player/play/1.1711287',
'md5': '33fcd8f6719b9dd60a5e73adcb83b9f6',
'info_dict': {
'id': '2164402062',
@@ -206,38 +249,53 @@ class CBCPlayerIE(InfoExtractor):
'categories': ['News/Canada/Windsor'],
'location': 'Windsor',
'tags': ['cancer'],
- 'creator': 'Allison Johnson',
+ 'creators': ['Allison Johnson'],
'media_type': 'Excerpt',
},
}, {
# Has subtitles
# These broadcasts expire after ~1 month, can find new test URL here:
# https://www.cbc.ca/player/news/TV%20Shows/The%20National/Latest%20Broadcast
- 'url': 'http://www.cbc.ca/player/play/2284799043667',
- 'md5': '9b49f0839e88b6ec0b01d840cf3d42b5',
+ 'url': 'https://www.cbc.ca/player/play/1.7159484',
+ 'md5': '6ed6cd0fc2ef568d2297ba68a763d455',
'info_dict': {
- 'id': '2284799043667',
+ 'id': '2324213316001',
'ext': 'mp4',
- 'title': 'The National | Hockey coach charged, Green grants, Safer drugs',
- 'description': 'md5:84ef46321c94bcf7d0159bb565d26bfa',
- 'timestamp': 1700272800,
- 'duration': 2718.833,
+ 'title': 'The National | School boards sue social media giants',
+ 'description': 'md5:4b4db69322fa32186c3ce426da07402c',
+ 'timestamp': 1711681200,
+ 'duration': 2743.400,
'subtitles': {'eng': [{'ext': 'vtt', 'protocol': 'm3u8_native'}]},
- 'thumbnail': 'https://thumbnails.cbc.ca/maven_legacy/thumbnails/907/171/thumbnail.jpeg',
+ 'thumbnail': 'https://thumbnails.cbc.ca/maven_legacy/thumbnails/607/559/thumbnail.jpeg',
'uploader': 'CBCC-NEW',
'chapters': 'count:5',
- 'upload_date': '20231118',
+ 'upload_date': '20240329',
'categories': 'count:4',
'series': 'The National - Full Show',
'tags': 'count:1',
- 'creator': 'News',
+ 'creators': ['News'],
'location': 'Canada',
'media_type': 'Full Program',
},
+ }, {
+ 'url': 'cbcplayer:1.7159484',
+ 'only_matching': True,
+ }, {
+ 'url': 'cbcplayer:2164402062',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.cbc.ca/player/play/2657631896',
+ 'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
+ if '.' in video_id:
+ webpage = self._download_webpage(f'https://www.cbc.ca/player/play/{video_id}', video_id)
+ video_id = self._search_json(
+ r'window\.__INITIAL_STATE__\s*=', webpage,
+ 'initial state', video_id)['video']['currentClip']['mediaId']
+
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
diff --git a/yt_dlp/extractor/cda.py b/yt_dlp/extractor/cda.py
index 1157114..90b4d08 100644
--- a/yt_dlp/extractor/cda.py
+++ b/yt_dlp/extractor/cda.py
@@ -1,6 +1,6 @@
import base64
import codecs
-import datetime
+import datetime as dt
import hashlib
import hmac
import json
@@ -134,7 +134,7 @@ class CDAIE(InfoExtractor):
self._API_HEADERS['User-Agent'] = f'pl.cda 1.0 (version {app_version}; Android {android_version}; {phone_model})'
cached_bearer = self.cache.load(self._BEARER_CACHE, username) or {}
- if cached_bearer.get('valid_until', 0) > datetime.datetime.now().timestamp() + 5:
+ if cached_bearer.get('valid_until', 0) > dt.datetime.now().timestamp() + 5:
self._API_HEADERS['Authorization'] = f'Bearer {cached_bearer["token"]}'
return
@@ -154,7 +154,7 @@ class CDAIE(InfoExtractor):
})
self.cache.store(self._BEARER_CACHE, username, {
'token': token_res['access_token'],
- 'valid_until': token_res['expires_in'] + datetime.datetime.now().timestamp(),
+ 'valid_until': token_res['expires_in'] + dt.datetime.now().timestamp(),
})
self._API_HEADERS['Authorization'] = f'Bearer {token_res["access_token"]}'
diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py
index e776cca..57bbf9b 100644
--- a/yt_dlp/extractor/common.py
+++ b/yt_dlp/extractor/common.py
@@ -37,6 +37,7 @@ from ..networking.exceptions import (
IncompleteRead,
network_exceptions,
)
+from ..networking.impersonate import ImpersonateTarget
from ..utils import (
IDENTITY,
JSON_LD_RE,
@@ -170,12 +171,12 @@ class InfoExtractor:
Automatically calculated from width and height
* dynamic_range The dynamic range of the video. One of:
"SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV"
- * tbr Average bitrate of audio and video in KBit/s
- * abr Average audio bitrate in KBit/s
+ * tbr Average bitrate of audio and video in kbps (1000 bits/sec)
+ * abr Average audio bitrate in kbps (1000 bits/sec)
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* audio_channels Number of audio channels
- * vbr Average video bitrate in KBit/s
+ * vbr Average video bitrate in kbps (1000 bits/sec)
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
@@ -246,7 +247,8 @@ class InfoExtractor:
* downloader_options A dictionary of downloader options
(For internal use only)
* http_chunk_size Chunk size for HTTP downloads
- * ffmpeg_args Extra arguments for ffmpeg downloader
+ * ffmpeg_args Extra arguments for ffmpeg downloader (input)
+ * ffmpeg_args_out Extra arguments for ffmpeg downloader (output)
* is_dash_periods Whether the format is a result of merging
multiple DASH periods.
RTMP formats can also have the additional fields: page_url,
@@ -817,7 +819,7 @@ class InfoExtractor:
else:
return err.status in variadic(expected_status)
- def _create_request(self, url_or_request, data=None, headers=None, query=None):
+ def _create_request(self, url_or_request, data=None, headers=None, query=None, extensions=None):
if isinstance(url_or_request, urllib.request.Request):
self._downloader.deprecation_warning(
'Passing a urllib.request.Request to _create_request() is deprecated. '
@@ -826,10 +828,11 @@ class InfoExtractor:
elif not isinstance(url_or_request, Request):
url_or_request = Request(url_or_request)
- url_or_request.update(data=data, headers=headers, query=query)
+ url_or_request.update(data=data, headers=headers, query=query, extensions=extensions)
return url_or_request
- def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers=None, query=None, expected_status=None):
+ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None,
+ headers=None, query=None, expected_status=None, impersonate=None, require_impersonation=False):
"""
Return the response handle.
@@ -860,8 +863,31 @@ class InfoExtractor:
headers = (headers or {}).copy()
headers.setdefault('X-Forwarded-For', self._x_forwarded_for_ip)
+ extensions = {}
+
+ if impersonate in (True, ''):
+ impersonate = ImpersonateTarget()
+ requested_targets = [
+ t if isinstance(t, ImpersonateTarget) else ImpersonateTarget.from_str(t)
+ for t in variadic(impersonate)
+ ] if impersonate else []
+
+ available_target = next(filter(self._downloader._impersonate_target_available, requested_targets), None)
+ if available_target:
+ extensions['impersonate'] = available_target
+ elif requested_targets:
+ message = 'The extractor is attempting impersonation, but '
+ message += (
+ 'no impersonate target is available' if not str(impersonate)
+ else f'none of these impersonate targets are available: "{", ".join(map(str, requested_targets))}"')
+ info_msg = ('see https://github.com/yt-dlp/yt-dlp#impersonation '
+ 'for information on installing the required dependencies')
+ if require_impersonation:
+ raise ExtractorError(f'{message}; {info_msg}', expected=True)
+ self.report_warning(f'{message}; if you encounter errors, then {info_msg}', only_once=True)
+
try:
- return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
+ return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query, extensions))
except network_exceptions as err:
if isinstance(err, HTTPError):
if self.__can_accept_status_code(err, expected_status):
@@ -880,13 +906,14 @@ class InfoExtractor:
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True,
- encoding=None, data=None, headers={}, query={}, expected_status=None):
+ encoding=None, data=None, headers={}, query={}, expected_status=None,
+ impersonate=None, require_impersonation=False):
"""
Return a tuple (page content as string, URL handle).
Arguments:
url_or_request -- plain text URL as a string or
- a urllib.request.Request object
+ a yt_dlp.networking.Request object
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
@@ -911,13 +938,22 @@ class InfoExtractor:
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
+ impersonate -- the impersonate target. Can be any of the following entities:
+ - an instance of yt_dlp.networking.impersonate.ImpersonateTarget
+ - a string in the format of CLIENT[:OS]
+ - a list or a tuple of CLIENT[:OS] strings or ImpersonateTarget instances
+ - a boolean value; True means any impersonate target is sufficient
+ require_impersonation -- flag to toggle whether the request should raise an error
+ if impersonation is not possible (bool, default: False)
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, str):
url_or_request = url_or_request.partition('#')[0]
- urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
+ urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data,
+ headers=headers, query=query, expected_status=expected_status,
+ impersonate=impersonate, require_impersonation=require_impersonation)
if urlh is False:
assert not fatal
return False
@@ -1046,17 +1082,20 @@ class InfoExtractor:
return getattr(ie, parser)(content, *args, **kwargs)
def download_handle(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
- fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
+ fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None,
+ impersonate=None, require_impersonation=False):
res = self._download_webpage_handle(
url_or_request, video_id, note=note, errnote=errnote, fatal=fatal, encoding=encoding,
- data=data, headers=headers, query=query, expected_status=expected_status)
+ data=data, headers=headers, query=query, expected_status=expected_status,
+ impersonate=impersonate, require_impersonation=require_impersonation)
if res is False:
return res
content, urlh = res
return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote), urlh
def download_content(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
- fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
+ fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None,
+ impersonate=None, require_impersonation=False):
if self.get_param('load_pages'):
url_or_request = self._create_request(url_or_request, data, headers, query)
filename = self._request_dump_filename(url_or_request.url, video_id)
@@ -1079,6 +1118,8 @@ class InfoExtractor:
'headers': headers,
'query': query,
'expected_status': expected_status,
+ 'impersonate': impersonate,
+ 'require_impersonation': require_impersonation,
}
if parser is None:
kwargs.pop('transform_source')
diff --git a/yt_dlp/extractor/crunchyroll.py b/yt_dlp/extractor/crunchyroll.py
index 8d997de..118b575 100644
--- a/yt_dlp/extractor/crunchyroll.py
+++ b/yt_dlp/extractor/crunchyroll.py
@@ -1,4 +1,5 @@
import base64
+import uuid
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
@@ -7,12 +8,11 @@ from ..utils import (
float_or_none,
format_field,
int_or_none,
- join_nonempty,
+ jwt_decode_hs256,
parse_age_limit,
parse_count,
parse_iso8601,
qualities,
- remove_start,
time_seconds,
traverse_obj,
url_or_none,
@@ -27,6 +27,7 @@ class CrunchyrollBaseIE(InfoExtractor):
_AUTH_HEADERS = None
_API_ENDPOINT = None
_BASIC_AUTH = None
+ _IS_PREMIUM = None
_CLIENT_ID = ('cr_web', 'noaihdevm_6iyg0a8l0q')
_LOCALE_LOOKUP = {
'ar': 'ar-SA',
@@ -84,11 +85,16 @@ class CrunchyrollBaseIE(InfoExtractor):
self.write_debug(f'Using cxApiParam={cx_api_param}')
CrunchyrollBaseIE._BASIC_AUTH = 'Basic ' + base64.b64encode(f'{cx_api_param}:'.encode()).decode()
- grant_type = 'etp_rt_cookie' if self.is_logged_in else 'client_id'
+ auth_headers = {'Authorization': CrunchyrollBaseIE._BASIC_AUTH}
+ if self.is_logged_in:
+ grant_type = 'etp_rt_cookie'
+ else:
+ grant_type = 'client_id'
+ auth_headers['ETP-Anonymous-ID'] = uuid.uuid4()
try:
auth_response = self._download_json(
f'{self._BASE_URL}/auth/v1/token', None, note=f'Authenticating with grant_type={grant_type}',
- headers={'Authorization': CrunchyrollBaseIE._BASIC_AUTH}, data=f'grant_type={grant_type}'.encode())
+ headers=auth_headers, data=f'grant_type={grant_type}'.encode())
except ExtractorError as error:
if isinstance(error.cause, HTTPError) and error.cause.status == 403:
raise ExtractorError(
@@ -97,6 +103,7 @@ class CrunchyrollBaseIE(InfoExtractor):
'and your browser\'s User-Agent (with --user-agent)', expected=True)
raise
+ CrunchyrollBaseIE._IS_PREMIUM = 'cr_premium' in traverse_obj(auth_response, ('access_token', {jwt_decode_hs256}, 'benefits', ...))
CrunchyrollBaseIE._AUTH_HEADERS = {'Authorization': auth_response['token_type'] + ' ' + auth_response['access_token']}
CrunchyrollBaseIE._AUTH_REFRESH = time_seconds(seconds=traverse_obj(auth_response, ('expires_in', {float_or_none}), default=300) - 10)
@@ -135,62 +142,72 @@ class CrunchyrollBaseIE(InfoExtractor):
raise ExtractorError(f'Unexpected response when downloading {note} JSON')
return result
- def _extract_formats(self, stream_response, display_id=None):
- requested_formats = self._configuration_arg('format') or ['adaptive_hls']
- available_formats = {}
- for stream_type, streams in traverse_obj(
- stream_response, (('streams', ('data', 0)), {dict.items}, ...)):
- if stream_type not in requested_formats:
+ def _extract_chapters(self, internal_id):
+ # if no skip events are available, a 403 xml error is returned
+ skip_events = self._download_json(
+ f'https://static.crunchyroll.com/skip-events/production/{internal_id}.json',
+ internal_id, note='Downloading chapter info', fatal=False, errnote=False)
+ if not skip_events:
+ return None
+
+ chapters = []
+ for event in ('recap', 'intro', 'credits', 'preview'):
+ start = traverse_obj(skip_events, (event, 'start', {float_or_none}))
+ end = traverse_obj(skip_events, (event, 'end', {float_or_none}))
+ # some chapters have no start and/or ending time, they will just be ignored
+ if start is None or end is None:
continue
- for stream in traverse_obj(streams, lambda _, v: v['url']):
- hardsub_lang = stream.get('hardsub_locale') or ''
- format_id = join_nonempty(stream_type, format_field(stream, 'hardsub_locale', 'hardsub-%s'))
- available_formats[hardsub_lang] = (stream_type, format_id, hardsub_lang, stream['url'])
+ chapters.append({'title': event.capitalize(), 'start_time': start, 'end_time': end})
+
+ return chapters
+
+ def _extract_stream(self, identifier, display_id=None):
+ if not display_id:
+ display_id = identifier
+
+ self._update_auth()
+ stream_response = self._download_json(
+ f'https://cr-play-service.prd.crunchyrollsvc.com/v1/{identifier}/console/switch/play',
+ display_id, note='Downloading stream info', headers=CrunchyrollBaseIE._AUTH_HEADERS)
+
+ available_formats = {'': ('', '', stream_response['url'])}
+ for hardsub_lang, stream in traverse_obj(stream_response, ('hardSubs', {dict.items}, lambda _, v: v[1]['url'])):
+ available_formats[hardsub_lang] = (f'hardsub-{hardsub_lang}', hardsub_lang, stream['url'])
requested_hardsubs = [('' if val == 'none' else val) for val in (self._configuration_arg('hardsub') or ['none'])]
- if '' in available_formats and 'all' not in requested_hardsubs:
+ hardsub_langs = [lang for lang in available_formats if lang]
+ if hardsub_langs and 'all' not in requested_hardsubs:
full_format_langs = set(requested_hardsubs)
+ self.to_screen(f'Available hardsub languages: {", ".join(hardsub_langs)}')
self.to_screen(
- 'To get all formats of a hardsub language, use '
+ 'To extract formats of a hardsub language, use '
'"--extractor-args crunchyrollbeta:hardsub=<language_code or all>". '
'See https://github.com/yt-dlp/yt-dlp#crunchyrollbeta-crunchyroll for more info',
only_once=True)
else:
full_format_langs = set(map(str.lower, available_formats))
- audio_locale = traverse_obj(stream_response, ((None, 'meta'), 'audio_locale'), get_all=False)
+ audio_locale = traverse_obj(stream_response, ('audioLocale', {str}))
hardsub_preference = qualities(requested_hardsubs[::-1])
- formats = []
- for stream_type, format_id, hardsub_lang, stream_url in available_formats.values():
- if stream_type.endswith('hls'):
- if hardsub_lang.lower() in full_format_langs:
- adaptive_formats = self._extract_m3u8_formats(
- stream_url, display_id, 'mp4', m3u8_id=format_id,
- fatal=False, note=f'Downloading {format_id} HLS manifest')
- else:
- adaptive_formats = (self._m3u8_meta_format(stream_url, ext='mp4', m3u8_id=format_id),)
- elif stream_type.endswith('dash'):
- adaptive_formats = self._extract_mpd_formats(
- stream_url, display_id, mpd_id=format_id,
- fatal=False, note=f'Downloading {format_id} MPD manifest')
+ formats, subtitles = [], {}
+ for format_id, hardsub_lang, stream_url in available_formats.values():
+ if hardsub_lang.lower() in full_format_langs:
+ adaptive_formats, dash_subs = self._extract_mpd_formats_and_subtitles(
+ stream_url, display_id, mpd_id=format_id, headers=CrunchyrollBaseIE._AUTH_HEADERS,
+ fatal=False, note=f'Downloading {f"{format_id} " if hardsub_lang else ""}MPD manifest')
+ self._merge_subtitles(dash_subs, target=subtitles)
else:
- self.report_warning(f'Encountered unknown stream_type: {stream_type!r}', display_id, only_once=True)
- continue
+ continue # XXX: Update this if/when meta mpd formats are working
for f in adaptive_formats:
if f.get('acodec') != 'none':
f['language'] = audio_locale
f['quality'] = hardsub_preference(hardsub_lang.lower())
formats.extend(adaptive_formats)
- return formats
-
- def _extract_subtitles(self, data):
- subtitles = {}
+ for locale, subtitle in traverse_obj(stream_response, (('subtitles', 'captions'), {dict.items}, ...)):
+ subtitles.setdefault(locale, []).append(traverse_obj(subtitle, {'url': 'url', 'ext': 'format'}))
- for locale, subtitle in traverse_obj(data, ((None, 'meta'), 'subtitles', {dict.items}, ...)):
- subtitles[locale] = [traverse_obj(subtitle, {'url': 'url', 'ext': 'format'})]
-
- return subtitles
+ return formats, subtitles
class CrunchyrollCmsBaseIE(CrunchyrollBaseIE):
@@ -245,7 +262,11 @@ class CrunchyrollBetaIE(CrunchyrollCmsBaseIE):
'like_count': int,
'dislike_count': int,
},
- 'params': {'skip_download': 'm3u8', 'format': 'all[format_id~=hardsub]'},
+ 'params': {
+ 'skip_download': 'm3u8',
+ 'extractor_args': {'crunchyrollbeta': {'hardsub': ['de-DE']}},
+ 'format': 'bv[format_id~=hardsub]',
+ },
}, {
# Premium only
'url': 'https://www.crunchyroll.com/watch/GYE5WKQGR',
@@ -306,6 +327,7 @@ class CrunchyrollBetaIE(CrunchyrollCmsBaseIE):
'thumbnail': r're:^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
},
'params': {'skip_download': 'm3u8'},
+ 'skip': 'no longer exists',
}, {
'url': 'https://www.crunchyroll.com/watch/G62PEZ2E6',
'info_dict': {
@@ -359,31 +381,15 @@ class CrunchyrollBetaIE(CrunchyrollCmsBaseIE):
else:
raise ExtractorError(f'Unknown object type {object_type}')
- # There might be multiple audio languages for one object (`<object>_metadata.versions`),
- # so we need to get the id from `streams_link` instead or we dont know which language to choose
- streams_link = response.get('streams_link')
- if not streams_link and traverse_obj(response, (f'{object_type}_metadata', 'is_premium_only')):
+ if not self._IS_PREMIUM and traverse_obj(response, (f'{object_type}_metadata', 'is_premium_only')):
message = f'This {object_type} is for premium members only'
if self.is_logged_in:
raise ExtractorError(message, expected=True)
self.raise_login_required(message)
- # We need go from unsigned to signed api to avoid getting soft banned
- stream_response = self._call_cms_api_signed(remove_start(
- streams_link, '/content/v2/cms/'), internal_id, lang, 'stream info')
- result['formats'] = self._extract_formats(stream_response, internal_id)
- result['subtitles'] = self._extract_subtitles(stream_response)
+ result['formats'], result['subtitles'] = self._extract_stream(internal_id)
- # if no intro chapter is available, a 403 without usable data is returned
- intro_chapter = self._download_json(
- f'https://static.crunchyroll.com/datalab-intro-v2/{internal_id}.json',
- internal_id, note='Downloading chapter info', fatal=False, errnote=False)
- if isinstance(intro_chapter, dict):
- result['chapters'] = [{
- 'title': 'Intro',
- 'start_time': float_or_none(intro_chapter.get('startTime')),
- 'end_time': float_or_none(intro_chapter.get('endTime')),
- }]
+ result['chapters'] = self._extract_chapters(internal_id)
def calculate_count(item):
return parse_count(''.join((item['displayed'], item.get('unit') or '')))
@@ -512,7 +518,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
'display_id': 'egaono-hana',
'title': 'Egaono Hana',
'track': 'Egaono Hana',
- 'artist': 'Goose house',
+ 'artists': ['Goose house'],
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
'genres': ['J-Pop'],
},
@@ -525,11 +531,12 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
'display_id': 'crossing-field',
'title': 'Crossing Field',
'track': 'Crossing Field',
- 'artist': 'LiSA',
+ 'artists': ['LiSA'],
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
'genres': ['Anime'],
},
'params': {'skip_download': 'm3u8'},
+ 'skip': 'no longer exists',
}, {
'url': 'https://www.crunchyroll.com/watch/concert/MC2E2AC135',
'info_dict': {
@@ -538,7 +545,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
'display_id': 'live-is-smile-always-364joker-at-yokohama-arena',
'title': 'LiVE is Smile Always-364+JOKER- at YOKOHAMA ARENA',
'track': 'LiVE is Smile Always-364+JOKER- at YOKOHAMA ARENA',
- 'artist': 'LiSA',
+ 'artists': ['LiSA'],
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
'description': 'md5:747444e7e6300907b7a43f0a0503072e',
'genres': ['J-Pop'],
@@ -566,16 +573,14 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
if not response:
raise ExtractorError(f'No video with id {internal_id} could be found (possibly region locked?)', expected=True)
- streams_link = response.get('streams_link')
- if not streams_link and response.get('isPremiumOnly'):
+ if not self._IS_PREMIUM and response.get('isPremiumOnly'):
message = f'This {response.get("type") or "media"} is for premium members only'
if self.is_logged_in:
raise ExtractorError(message, expected=True)
self.raise_login_required(message)
result = self._transform_music_response(response)
- stream_response = self._call_api(streams_link, internal_id, lang, 'stream info')
- result['formats'] = self._extract_formats(stream_response, internal_id)
+ result['formats'], _ = self._extract_stream(f'music/{internal_id}', internal_id)
return result
@@ -587,7 +592,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
'display_id': 'slug',
'title': 'title',
'track': 'title',
- 'artist': ('artist', 'name'),
+ 'artists': ('artist', 'name', all),
'description': ('description', {str}, {lambda x: x.replace(r'\r\n', '\n') or None}),
'thumbnails': ('images', ..., ..., {
'url': ('source', {url_or_none}),
@@ -611,7 +616,7 @@ class CrunchyrollArtistIE(CrunchyrollBaseIE):
'info_dict': {
'id': 'MA179CB50D',
'title': 'LiSA',
- 'genres': ['J-Pop', 'Anime', 'Rock'],
+ 'genres': ['Anime', 'J-Pop', 'Rock'],
'description': 'md5:16d87de61a55c3f7d6c454b73285938e',
},
'playlist_mincount': 83,
diff --git a/yt_dlp/extractor/dropbox.py b/yt_dlp/extractor/dropbox.py
index bc2efce..0246975 100644
--- a/yt_dlp/extractor/dropbox.py
+++ b/yt_dlp/extractor/dropbox.py
@@ -65,12 +65,14 @@ class DropboxIE(InfoExtractor):
formats, subtitles, has_anonymous_download = [], {}, False
for encoded in reversed(re.findall(r'registerStreamedPrefetch\s*\(\s*"[\w/+=]+"\s*,\s*"([\w/+=]+)"', webpage)):
decoded = base64.b64decode(encoded).decode('utf-8', 'ignore')
+ if not has_anonymous_download:
+ has_anonymous_download = self._search_regex(
+ r'(anonymous:\tanonymous)', decoded, 'anonymous', default=False)
transcode_url = self._search_regex(
r'\n.(https://[^\x03\x08\x12\n]+\.m3u8)', decoded, 'transcode url', default=None)
if not transcode_url:
continue
formats, subtitles = self._extract_m3u8_formats_and_subtitles(transcode_url, video_id, 'mp4')
- has_anonymous_download = self._search_regex(r'(anonymous:\tanonymous)', decoded, 'anonymous', default=False)
break
# downloads enabled we can get the original file
diff --git a/yt_dlp/extractor/dtube.py b/yt_dlp/extractor/dtube.py
index bb06c42..5ea014c 100644
--- a/yt_dlp/extractor/dtube.py
+++ b/yt_dlp/extractor/dtube.py
@@ -1,5 +1,5 @@
import json
-from socket import timeout
+import socket
from .common import InfoExtractor
from ..utils import (
@@ -56,7 +56,7 @@ class DTubeIE(InfoExtractor):
try:
self.to_screen('%s: Checking %s video format URL' % (video_id, format_id))
self._downloader._opener.open(video_url, timeout=5).close()
- except timeout:
+ except socket.timeout:
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, format_id))
continue
diff --git a/yt_dlp/extractor/fathom.py b/yt_dlp/extractor/fathom.py
new file mode 100644
index 0000000..1df7d96
--- /dev/null
+++ b/yt_dlp/extractor/fathom.py
@@ -0,0 +1,54 @@
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+ extract_attributes,
+ float_or_none,
+ get_element_html_by_id,
+ parse_iso8601,
+)
+from ..utils.traversal import traverse_obj
+
+
+class FathomIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?fathom\.video/share/(?P<id>[^/?#&]+)'
+ _TESTS = [{
+ 'url': 'https://fathom.video/share/G9mkjkspnohVVZ_L5nrsoPycyWcB8y7s',
+ 'md5': '0decd5343b8f30ae268625e79a02b60f',
+ 'info_dict': {
+ 'id': '47200596',
+ 'ext': 'mp4',
+ 'title': 'eCom Inucbator - Coaching Session',
+ 'duration': 8125.380507,
+ 'timestamp': 1699048914,
+ 'upload_date': '20231103',
+ },
+ }, {
+ 'url': 'https://fathom.video/share/mEws3bybftHL2QLymxYEDeE21vtLxGVm',
+ 'md5': '4f5cb382126c22d1aba8a939f9c49690',
+ 'info_dict': {
+ 'id': '46812957',
+ 'ext': 'mp4',
+ 'title': 'Jon, Lawrence, Neman chat about practice',
+ 'duration': 3571.517847,
+ 'timestamp': 1698933600,
+ 'upload_date': '20231102',
+ },
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ props = traverse_obj(
+ get_element_html_by_id('app', webpage), ({extract_attributes}, 'data-page', {json.loads}, 'props'))
+ video_id = str(props['call']['id'])
+
+ return {
+ 'id': video_id,
+ 'formats': self._extract_m3u8_formats(props['call']['video_url'], video_id, 'mp4'),
+ **traverse_obj(props, {
+ 'title': ('head', 'title', {str}),
+ 'duration': ('duration', {float_or_none}),
+ 'timestamp': ('call', 'started_at', {parse_iso8601}),
+ }),
+ }
diff --git a/yt_dlp/extractor/generic.py b/yt_dlp/extractor/generic.py
index 9d82515..2cfed0f 100644
--- a/yt_dlp/extractor/generic.py
+++ b/yt_dlp/extractor/generic.py
@@ -2105,22 +2105,6 @@ class GenericIE(InfoExtractor):
},
},
{
- 'note': 'JW Player embed with unicode-escape sequences in URL',
- 'url': 'https://www.medici.tv/en/concerts/lahav-shani-mozart-mahler-israel-philharmonic-abu-dhabi-classics',
- 'info_dict': {
- 'id': 'm',
- 'ext': 'mp4',
- 'title': 'Lahav Shani conducts the Israel Philharmonic\'s first-ever concert in Abu Dhabi',
- 'description': 'Mahler\'s ',
- 'uploader': 'www.medici.tv',
- 'age_limit': 0,
- 'thumbnail': r're:^https?://.+\.jpg',
- },
- 'params': {
- 'skip_download': True,
- },
- },
- {
'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/',
'md5': 'e2f0a4c329f7986280b7328e24036d60',
'info_dict': {
diff --git a/yt_dlp/extractor/gofile.py b/yt_dlp/extractor/gofile.py
index eb1dcf8..c6eca0c 100644
--- a/yt_dlp/extractor/gofile.py
+++ b/yt_dlp/extractor/gofile.py
@@ -58,21 +58,18 @@ class GofileIE(InfoExtractor):
return
account_data = self._download_json(
- 'https://api.gofile.io/createAccount', None, note='Getting a new guest account')
+ 'https://api.gofile.io/accounts', None, 'Getting a new guest account', data=b'{}')
self._TOKEN = account_data['data']['token']
self._set_cookie('.gofile.io', 'accountToken', self._TOKEN)
def _entries(self, file_id):
- query_params = {
- 'contentId': file_id,
- 'token': self._TOKEN,
- 'wt': '4fd6sg89d7s6', # From https://gofile.io/dist/js/alljs.js
- }
+ query_params = {'wt': '4fd6sg89d7s6'} # From https://gofile.io/dist/js/alljs.js
password = self.get_param('videopassword')
if password:
query_params['password'] = hashlib.sha256(password.encode('utf-8')).hexdigest()
files = self._download_json(
- 'https://api.gofile.io/getContent', file_id, note='Getting filelist', query=query_params)
+ f'https://api.gofile.io/contents/{file_id}', file_id, 'Getting filelist',
+ query=query_params, headers={'Authorization': f'Bearer {self._TOKEN}'})
status = files['status']
if status == 'error-passwordRequired':
@@ -82,7 +79,7 @@ class GofileIE(InfoExtractor):
raise ExtractorError(f'{self.IE_NAME} said: status {status}', expected=True)
found_files = False
- for file in (try_get(files, lambda x: x['data']['contents'], dict) or {}).values():
+ for file in (try_get(files, lambda x: x['data']['children'], dict) or {}).values():
file_type, file_format = file.get('mimetype').split('/', 1)
if file_type not in ('video', 'audio') and file_format != 'vnd.mts':
continue
diff --git a/yt_dlp/extractor/goplay.py b/yt_dlp/extractor/goplay.py
index 74aad11..7a98e0f 100644
--- a/yt_dlp/extractor/goplay.py
+++ b/yt_dlp/extractor/goplay.py
@@ -1,6 +1,6 @@
import base64
import binascii
-import datetime
+import datetime as dt
import hashlib
import hmac
import json
@@ -422,7 +422,7 @@ class AwsIdp:
months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
- time_now = datetime.datetime.now(datetime.timezone.utc)
+ time_now = dt.datetime.now(dt.timezone.utc)
format_string = "{} {} {} %H:%M:%S UTC %Y".format(days[time_now.weekday()], months[time_now.month], time_now.day)
time_string = time_now.strftime(format_string)
return time_string
diff --git a/yt_dlp/extractor/imgur.py b/yt_dlp/extractor/imgur.py
index 1fa0a2a..f32c116 100644
--- a/yt_dlp/extractor/imgur.py
+++ b/yt_dlp/extractor/imgur.py
@@ -76,6 +76,23 @@ class ImgurIE(ImgurBaseIE):
'thumbnail': 'https://i.imgur.com/jxBXAMCh.jpg',
'dislike_count': int,
},
+ }, {
+ # needs Accept header, ref: https://github.com/yt-dlp/yt-dlp/issues/9458
+ 'url': 'https://imgur.com/zV03bd5',
+ 'md5': '59df97884e8ba76143ff6b640a0e2904',
+ 'info_dict': {
+ 'id': 'zV03bd5',
+ 'ext': 'mp4',
+ 'title': 'Ive - Liz',
+ 'timestamp': 1710491255,
+ 'upload_date': '20240315',
+ 'like_count': int,
+ 'dislike_count': int,
+ 'duration': 56.92,
+ 'comment_count': int,
+ 'release_timestamp': 1710491255,
+ 'release_date': '20240315',
+ },
}]
def _real_extract(self, url):
@@ -192,6 +209,7 @@ class ImgurIE(ImgurBaseIE):
'id': video_id,
'formats': formats,
'thumbnail': url_or_none(search('thumbnailUrl')),
+ 'http_headers': {'Accept': '*/*'},
}
diff --git a/yt_dlp/extractor/jiosaavn.py b/yt_dlp/extractor/jiosaavn.py
index a592098..d7f0a2d 100644
--- a/yt_dlp/extractor/jiosaavn.py
+++ b/yt_dlp/extractor/jiosaavn.py
@@ -1,89 +1,125 @@
+import functools
+
from .common import InfoExtractor
from ..utils import (
+ format_field,
int_or_none,
js_to_json,
+ make_archive_id,
+ smuggle_url,
+ unsmuggle_url,
+ url_basename,
url_or_none,
urlencode_postdata,
- urljoin,
)
from ..utils.traversal import traverse_obj
class JioSaavnBaseIE(InfoExtractor):
- def _extract_initial_data(self, url, audio_id):
- webpage = self._download_webpage(url, audio_id)
+ _VALID_BITRATES = {'16', '32', '64', '128', '320'}
+
+ @functools.cached_property
+ def requested_bitrates(self):
+ requested_bitrates = self._configuration_arg('bitrate', ['128', '320'], ie_key='JioSaavn')
+ if invalid_bitrates := set(requested_bitrates) - self._VALID_BITRATES:
+ raise ValueError(
+ f'Invalid bitrate(s): {", ".join(invalid_bitrates)}. '
+ + f'Valid bitrates are: {", ".join(sorted(self._VALID_BITRATES, key=int))}')
+ return requested_bitrates
+
+ def _extract_formats(self, song_data):
+ for bitrate in self.requested_bitrates:
+ media_data = self._download_json(
+ 'https://www.jiosaavn.com/api.php', song_data['id'],
+ f'Downloading format info for {bitrate}',
+ fatal=False, data=urlencode_postdata({
+ '__call': 'song.generateAuthToken',
+ '_format': 'json',
+ 'bitrate': bitrate,
+ 'url': song_data['encrypted_media_url'],
+ }))
+ if not traverse_obj(media_data, ('auth_url', {url_or_none})):
+ self.report_warning(f'Unable to extract format info for {bitrate}')
+ continue
+ ext = media_data.get('type')
+ yield {
+ 'url': media_data['auth_url'],
+ 'ext': 'm4a' if ext == 'mp4' else ext,
+ 'format_id': bitrate,
+ 'abr': int(bitrate),
+ 'vcodec': 'none',
+ }
+
+ def _extract_song(self, song_data):
+ info = traverse_obj(song_data, {
+ 'id': ('id', {str}),
+ 'title': ('title', 'text', {str}),
+ 'album': ('album', 'text', {str}),
+ 'thumbnail': ('image', 0, {url_or_none}),
+ 'duration': ('duration', {int_or_none}),
+ 'view_count': ('play_count', {int_or_none}),
+ 'release_year': ('year', {int_or_none}),
+ 'artists': ('artists', lambda _, v: v['role'] == 'singer', 'name', {str}),
+ 'webpage_url': ('perma_url', {url_or_none}), # for song, playlist extraction
+ })
+ if not info.get('webpage_url'): # for album extraction / fallback
+ info['webpage_url'] = format_field(
+ song_data, [('title', 'action')], 'https://www.jiosaavn.com%s') or None
+ if webpage_url := info['webpage_url']:
+ info['_old_archive_ids'] = [make_archive_id(JioSaavnSongIE, url_basename(webpage_url))]
+
+ return info
+
+ def _extract_initial_data(self, url, display_id):
+ webpage = self._download_webpage(url, display_id)
return self._search_json(
r'window\.__INITIAL_DATA__\s*=', webpage,
- 'init json', audio_id, transform_source=js_to_json)
+ 'initial data', display_id, transform_source=js_to_json)
class JioSaavnSongIE(JioSaavnBaseIE):
+ IE_NAME = 'jiosaavn:song'
_VALID_URL = r'https?://(?:www\.)?(?:jiosaavn\.com/song/[^/?#]+/|saavn\.com/s/song/(?:[^/?#]+/){3})(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.jiosaavn.com/song/leja-re/OQsEfQFVUXk',
'md5': '3b84396d15ed9e083c3106f1fa589c04',
'info_dict': {
- 'id': 'OQsEfQFVUXk',
- 'ext': 'mp4',
+ 'id': 'IcoLuefJ',
+ 'ext': 'm4a',
'title': 'Leja Re',
'album': 'Leja Re',
'thumbnail': 'https://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',
'duration': 205,
'view_count': int,
'release_year': 2018,
+ 'artists': ['Sandesh Shandilya', 'Dhvani Bhanushali', 'Tanishk Bagchi'],
+ '_old_archive_ids': ['jiosaavnsong OQsEfQFVUXk'],
},
}, {
'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',
'only_matching': True,
}]
- _VALID_BITRATES = ('16', '32', '64', '128', '320')
-
def _real_extract(self, url):
- audio_id = self._match_id(url)
- extract_bitrates = self._configuration_arg('bitrate', ['128', '320'], ie_key='JioSaavn')
- if invalid_bitrates := [br for br in extract_bitrates if br not in self._VALID_BITRATES]:
- raise ValueError(
- f'Invalid bitrate(s): {", ".join(invalid_bitrates)}. '
- + f'Valid bitrates are: {", ".join(self._VALID_BITRATES)}')
+ url, smuggled_data = unsmuggle_url(url)
+ song_data = traverse_obj(smuggled_data, ({
+ 'id': ('id', {str}),
+ 'encrypted_media_url': ('encrypted_media_url', {str}),
+ }))
- song_data = self._extract_initial_data(url, audio_id)['song']['song']
- formats = []
- for bitrate in extract_bitrates:
- media_data = self._download_json(
- 'https://www.jiosaavn.com/api.php', audio_id, f'Downloading format info for {bitrate}',
- fatal=False, data=urlencode_postdata({
- '__call': 'song.generateAuthToken',
- '_format': 'json',
- 'bitrate': bitrate,
- 'url': song_data['encrypted_media_url'],
- }))
- if not media_data.get('auth_url'):
- self.report_warning(f'Unable to extract format info for {bitrate}')
- continue
- formats.append({
- 'url': media_data['auth_url'],
- 'ext': media_data.get('type'),
- 'format_id': bitrate,
- 'abr': int(bitrate),
- 'vcodec': 'none',
- })
+ if 'id' in song_data and 'encrypted_media_url' in song_data:
+ result = {'id': song_data['id']}
+ else:
+ # only extract metadata if this is not a url_transparent result
+ song_data = self._extract_initial_data(url, self._match_id(url))['song']['song']
+ result = self._extract_song(song_data)
- return {
- 'id': audio_id,
- 'formats': formats,
- **traverse_obj(song_data, {
- 'title': ('title', 'text'),
- 'album': ('album', 'text'),
- 'thumbnail': ('image', 0, {url_or_none}),
- 'duration': ('duration', {int_or_none}),
- 'view_count': ('play_count', {int_or_none}),
- 'release_year': ('year', {int_or_none}),
- }),
- }
+ result['formats'] = list(self._extract_formats(song_data))
+ return result
class JioSaavnAlbumIE(JioSaavnBaseIE):
+ IE_NAME = 'jiosaavn:album'
_VALID_URL = r'https?://(?:www\.)?(?:jio)?saavn\.com/album/[^/?#]+/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.jiosaavn.com/album/96/buIOjYZDrNA_',
@@ -94,12 +130,45 @@ class JioSaavnAlbumIE(JioSaavnBaseIE):
'playlist_count': 10,
}]
+ def _entries(self, playlist_data):
+ for song_data in traverse_obj(playlist_data, (
+ 'modules', lambda _, x: x['key'] == 'list', 'data', lambda _, v: v['title']['action'])):
+ song_info = self._extract_song(song_data)
+ # album song data is missing artists and release_year, need to re-extract metadata
+ yield self.url_result(song_info['webpage_url'], JioSaavnSongIE, **song_info)
+
def _real_extract(self, url):
- album_id = self._match_id(url)
- album_view = self._extract_initial_data(url, album_id)['albumView']
-
- return self.playlist_from_matches(
- traverse_obj(album_view, (
- 'modules', lambda _, x: x['key'] == 'list', 'data', ..., 'title', 'action', {str})),
- album_id, traverse_obj(album_view, ('album', 'title', 'text', {str})), ie=JioSaavnSongIE,
- getter=lambda x: urljoin('https://www.jiosaavn.com/', x))
+ display_id = self._match_id(url)
+ album_data = self._extract_initial_data(url, display_id)['albumView']
+
+ return self.playlist_result(
+ self._entries(album_data), display_id, traverse_obj(album_data, ('album', 'title', 'text', {str})))
+
+
+class JioSaavnPlaylistIE(JioSaavnBaseIE):
+ IE_NAME = 'jiosaavn:playlist'
+ _VALID_URL = r'https?://(?:www\.)?(?:jio)?saavn\.com/s/playlist/(?:[^/?#]+/){2}(?P<id>[^/?#]+)'
+ _TESTS = [{
+ 'url': 'https://www.jiosaavn.com/s/playlist/2279fbe391defa793ad7076929a2f5c9/mood-english/LlJ8ZWT1ibN5084vKHRj2Q__',
+ 'info_dict': {
+ 'id': 'LlJ8ZWT1ibN5084vKHRj2Q__',
+ 'title': 'Mood English',
+ },
+ 'playlist_mincount': 50,
+ }]
+
+ def _entries(self, playlist_data):
+ for song_data in traverse_obj(playlist_data, ('list', lambda _, v: v['perma_url'])):
+ song_info = self._extract_song(song_data)
+ url = smuggle_url(song_info['webpage_url'], {
+ 'id': song_data['id'],
+ 'encrypted_media_url': song_data['encrypted_media_url'],
+ })
+ yield self.url_result(url, JioSaavnSongIE, url_transparent=True, **song_info)
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ playlist_data = self._extract_initial_data(url, display_id)['playlist']['playlist']
+
+ return self.playlist_result(
+ self._entries(playlist_data), display_id, traverse_obj(playlist_data, ('title', 'text', {str})))
diff --git a/yt_dlp/extractor/joqrag.py b/yt_dlp/extractor/joqrag.py
index 3bb28af..7a91d4a 100644
--- a/yt_dlp/extractor/joqrag.py
+++ b/yt_dlp/extractor/joqrag.py
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
import urllib.parse
from .common import InfoExtractor
@@ -50,8 +50,8 @@ class JoqrAgIE(InfoExtractor):
def _extract_start_timestamp(self, video_id, is_live):
def extract_start_time_from(date_str):
- dt = datetime_from_str(date_str) + datetime.timedelta(hours=9)
- date = dt.strftime('%Y%m%d')
+ dt_ = datetime_from_str(date_str) + dt.timedelta(hours=9)
+ date = dt_.strftime('%Y%m%d')
start_time = self._search_regex(
r'<h3[^>]+\bclass="dailyProgram-itemHeaderTime"[^>]*>[\s\d:]+–\s*(\d{1,2}:\d{1,2})',
self._download_webpage(
@@ -60,7 +60,7 @@ class JoqrAgIE(InfoExtractor):
errnote=f'Failed to download program list of {date}') or '',
'start time', default=None)
if start_time:
- return unified_timestamp(f'{dt.strftime("%Y/%m/%d")} {start_time} +09:00')
+ return unified_timestamp(f'{dt_.strftime("%Y/%m/%d")} {start_time} +09:00')
return None
start_timestamp = extract_start_time_from('today')
@@ -80,14 +80,14 @@ class JoqrAgIE(InfoExtractor):
note='Downloading metadata', errnote='Failed to download metadata')
title = self._extract_metadata('Program_name', metadata)
- if title == '放送休止':
+ if not title or title == '放送休止':
formats = []
live_status = 'is_upcoming'
release_timestamp = self._extract_start_timestamp(video_id, False)
msg = 'This stream is not currently live'
if release_timestamp:
msg += (' and will start at '
- + datetime.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S'))
+ + dt.datetime.fromtimestamp(release_timestamp).strftime('%Y-%m-%d %H:%M:%S'))
self.raise_no_formats(msg, expected=True)
else:
m3u8_path = self._search_regex(
diff --git a/yt_dlp/extractor/kick.py b/yt_dlp/extractor/kick.py
index d124372..889548f 100644
--- a/yt_dlp/extractor/kick.py
+++ b/yt_dlp/extractor/kick.py
@@ -13,7 +13,8 @@ from ..utils import (
class KickBaseIE(InfoExtractor):
def _real_initialize(self):
- self._request_webpage(HEADRequest('https://kick.com/'), None, 'Setting up session', fatal=False)
+ self._request_webpage(
+ HEADRequest('https://kick.com/'), None, 'Setting up session', fatal=False, impersonate=True)
xsrf_token = self._get_cookies('https://kick.com/').get('XSRF-TOKEN')
if not xsrf_token:
self.write_debug('kick.com did not set XSRF-TOKEN cookie')
@@ -25,7 +26,7 @@ class KickBaseIE(InfoExtractor):
def _call_api(self, path, display_id, note='Downloading API JSON', headers={}, **kwargs):
return self._download_json(
f'https://kick.com/api/v1/{path}', display_id, note=note,
- headers=merge_dicts(headers, self._API_HEADERS), **kwargs)
+ headers=merge_dicts(headers, self._API_HEADERS), impersonate=True, **kwargs)
class KickIE(KickBaseIE):
@@ -82,26 +83,27 @@ class KickIE(KickBaseIE):
class KickVODIE(KickBaseIE):
_VALID_URL = r'https?://(?:www\.)?kick\.com/video/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
_TESTS = [{
- 'url': 'https://kick.com/video/54244b5e-050a-4df4-a013-b2433dafbe35',
- 'md5': '73691206a6a49db25c5aa1588e6538fc',
+ 'url': 'https://kick.com/video/58bac65b-e641-4476-a7ba-3707a35e60e3',
+ 'md5': '3870f94153e40e7121a6e46c068b70cb',
'info_dict': {
- 'id': '54244b5e-050a-4df4-a013-b2433dafbe35',
+ 'id': '58bac65b-e641-4476-a7ba-3707a35e60e3',
'ext': 'mp4',
- 'title': 'Making 710-carBoosting. Kinda No Pixel inspired. !guilded - !links',
- 'description': 'md5:a0d3546bf7955d0a8252ffe0fd6f518f',
- 'channel': 'kmack710',
- 'channel_id': '16278',
- 'uploader': 'Kmack710',
- 'uploader_id': '16412',
- 'upload_date': '20221206',
- 'timestamp': 1670318289,
- 'duration': 40104.0,
+ 'title': '🤠REBIRTH IS BACK!!!!🤠!stake CODE JAREDFPS 🤠',
+ 'description': 'md5:02b0c46f9b4197fb545ab09dddb85b1d',
+ 'channel': 'jaredfps',
+ 'channel_id': '26608',
+ 'uploader': 'JaredFPS',
+ 'uploader_id': '26799',
+ 'upload_date': '20240402',
+ 'timestamp': 1712097108,
+ 'duration': 33859.0,
'thumbnail': r're:^https?://.*\.jpg',
- 'categories': ['Grand Theft Auto V'],
+ 'categories': ['Call of Duty: Warzone'],
},
'params': {
'skip_download': 'm3u8',
},
+ 'expected_warnings': [r'impersonation'],
}]
def _real_extract(self, url):
diff --git a/yt_dlp/extractor/leeco.py b/yt_dlp/extractor/leeco.py
index 85033b8..5d61a60 100644
--- a/yt_dlp/extractor/leeco.py
+++ b/yt_dlp/extractor/leeco.py
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
import hashlib
import re
import time
@@ -185,7 +185,7 @@ class LeIE(InfoExtractor):
publish_time = parse_iso8601(self._html_search_regex(
r'发布时间&nbsp;([^<>]+) ', page, 'publish time', default=None),
- delimiter=' ', timezone=datetime.timedelta(hours=8))
+ delimiter=' ', timezone=dt.timedelta(hours=8))
description = self._html_search_meta('description', page, fatal=False)
return {
diff --git a/yt_dlp/extractor/linkedin.py b/yt_dlp/extractor/linkedin.py
index ad41c0e..e12f467 100644
--- a/yt_dlp/extractor/linkedin.py
+++ b/yt_dlp/extractor/linkedin.py
@@ -1,4 +1,4 @@
-from itertools import zip_longest
+import itertools
import re
from .common import InfoExtractor
@@ -156,7 +156,7 @@ class LinkedInLearningIE(LinkedInLearningBaseIE):
def json2srt(self, transcript_lines, duration=None):
srt_data = ''
- for line, (line_dict, next_dict) in enumerate(zip_longest(transcript_lines, transcript_lines[1:])):
+ for line, (line_dict, next_dict) in enumerate(itertools.zip_longest(transcript_lines, transcript_lines[1:])):
start_time, caption = line_dict['transcriptStartAt'] / 1000, line_dict['caption']
end_time = next_dict['transcriptStartAt'] / 1000 if next_dict else duration or start_time + 1
srt_data += '%d\n%s --> %s\n%s\n\n' % (line + 1, srt_subtitles_timecode(start_time),
diff --git a/yt_dlp/extractor/loom.py b/yt_dlp/extractor/loom.py
new file mode 100644
index 0000000..1191aa1
--- /dev/null
+++ b/yt_dlp/extractor/loom.py
@@ -0,0 +1,461 @@
+import json
+import textwrap
+import urllib.parse
+import uuid
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ determine_ext,
+ filter_dict,
+ get_first,
+ int_or_none,
+ parse_iso8601,
+ update_url,
+ url_or_none,
+ variadic,
+)
+from ..utils.traversal import traverse_obj
+
+
+class LoomIE(InfoExtractor):
+ IE_NAME = 'loom'
+ _VALID_URL = r'https?://(?:www\.)?loom\.com/(?:share|embed)/(?P<id>[\da-f]{32})'
+ _EMBED_REGEX = [rf'<iframe[^>]+\bsrc=["\'](?P<url>{_VALID_URL})']
+ _TESTS = [{
+ # m3u8 raw-url, mp4 transcoded-url, cdn url == raw-url, json subs only
+ 'url': 'https://www.loom.com/share/43d05f362f734614a2e81b4694a3a523',
+ 'md5': 'bfc2d7e9c2e0eb4813212230794b6f42',
+ 'info_dict': {
+ 'id': '43d05f362f734614a2e81b4694a3a523',
+ 'ext': 'mp4',
+ 'title': 'A Ruler for Windows - 28 March 2022',
+ 'uploader': 'wILLIAM PIP',
+ 'upload_date': '20220328',
+ 'timestamp': 1648454238,
+ 'duration': 27,
+ },
+ }, {
+ # webm raw-url, mp4 transcoded-url, cdn url == transcoded-url, no subs
+ 'url': 'https://www.loom.com/share/c43a642f815f4378b6f80a889bb73d8d',
+ 'md5': '70f529317be8cf880fcc2c649a531900',
+ 'info_dict': {
+ 'id': 'c43a642f815f4378b6f80a889bb73d8d',
+ 'ext': 'webm',
+ 'title': 'Lilah Nielsen Intro Video',
+ 'uploader': 'Lilah Nielsen',
+ 'upload_date': '20200826',
+ 'timestamp': 1598480716,
+ 'duration': 20,
+ },
+ }, {
+ # m3u8 raw-url, mp4 transcoded-url, cdn url == raw-url, vtt sub and json subs
+ 'url': 'https://www.loom.com/share/9458bcbf79784162aa62ffb8dd66201b',
+ 'md5': '51737ec002969dd28344db4d60b9cbbb',
+ 'info_dict': {
+ 'id': '9458bcbf79784162aa62ffb8dd66201b',
+ 'ext': 'mp4',
+ 'title': 'Sharing screen with gpt-4',
+ 'description': 'Sharing screen with GPT 4 vision model and asking questions to guide through blender.',
+ 'uploader': 'Suneel Matham',
+ 'chapters': 'count:3',
+ 'upload_date': '20231109',
+ 'timestamp': 1699518978,
+ 'duration': 93,
+ },
+ }, {
+ # mpd raw-url, mp4 transcoded-url, cdn url == raw-url, no subs
+ 'url': 'https://www.loom.com/share/24351eb8b317420289b158e4b7e96ff2',
+ 'info_dict': {
+ 'id': '24351eb8b317420289b158e4b7e96ff2',
+ 'ext': 'webm',
+ 'title': 'OMFG clown',
+ 'description': 'md5:285c5ee9d62aa087b7e3271b08796815',
+ 'uploader': 'MrPumkin B',
+ 'upload_date': '20210924',
+ 'timestamp': 1632519618,
+ 'duration': 210,
+ },
+ 'params': {'skip_download': 'dash'},
+ }, {
+ # password-protected
+ 'url': 'https://www.loom.com/share/50e26e8aeb7940189dff5630f95ce1f4',
+ 'md5': '5cc7655e7d55d281d203f8ffd14771f7',
+ 'info_dict': {
+ 'id': '50e26e8aeb7940189dff5630f95ce1f4',
+ 'ext': 'mp4',
+ 'title': 'iOS Mobile Upload',
+ 'uploader': 'Simon Curran',
+ 'upload_date': '20200520',
+ 'timestamp': 1590000123,
+ 'duration': 35,
+ },
+ 'params': {'videopassword': 'seniorinfants2'},
+ }, {
+ # embed, transcoded-url endpoint sends empty JSON response
+ 'url': 'https://www.loom.com/embed/ddcf1c1ad21f451ea7468b1e33917e4e',
+ 'md5': '8488817242a0db1cb2ad0ea522553cf6',
+ 'info_dict': {
+ 'id': 'ddcf1c1ad21f451ea7468b1e33917e4e',
+ 'ext': 'mp4',
+ 'title': 'CF Reset User\'s Password',
+ 'uploader': 'Aimee Heintz',
+ 'upload_date': '20220707',
+ 'timestamp': 1657216459,
+ 'duration': 181,
+ },
+ 'expected_warnings': ['Failed to parse JSON'],
+ }]
+ _WEBPAGE_TESTS = [{
+ 'url': 'https://www.loom.com/community/e1229802a8694a09909e8ba0fbb6d073-pg',
+ 'md5': 'ec838cd01b576cf0386f32e1ae424609',
+ 'info_dict': {
+ 'id': 'e1229802a8694a09909e8ba0fbb6d073',
+ 'ext': 'mp4',
+ 'title': 'Rexie Jane Cimafranca - Founder\'s Presentation',
+ 'uploader': 'Rexie Cimafranca',
+ 'upload_date': '20230213',
+ 'duration': 247,
+ 'timestamp': 1676274030,
+ },
+ }]
+
+ _GRAPHQL_VARIABLES = {
+ 'GetVideoSource': {
+ 'acceptableMimes': ['DASH', 'M3U8', 'MP4'],
+ },
+ }
+ _GRAPHQL_QUERIES = {
+ 'GetVideoSSR': textwrap.dedent('''\
+ query GetVideoSSR($videoId: ID!, $password: String) {
+ getVideo(id: $videoId, password: $password) {
+ __typename
+ ... on PrivateVideo {
+ id
+ status
+ message
+ __typename
+ }
+ ... on VideoPasswordMissingOrIncorrect {
+ id
+ message
+ __typename
+ }
+ ... on RegularUserVideo {
+ id
+ __typename
+ createdAt
+ description
+ download_enabled
+ folder_id
+ is_protected
+ needs_password
+ owner {
+ display_name
+ __typename
+ }
+ privacy
+ s3_id
+ name
+ video_properties {
+ avgBitRate
+ client
+ camera_enabled
+ client_version
+ duration
+ durationMs
+ format
+ height
+ microphone_enabled
+ os
+ os_version
+ recordingClient
+ recording_type
+ recording_version
+ screen_type
+ tab_audio
+ trim_duration
+ width
+ __typename
+ }
+ playable_duration
+ source_duration
+ visibility
+ }
+ }
+ }\n'''),
+ 'GetVideoSource': textwrap.dedent('''\
+ query GetVideoSource($videoId: ID!, $password: String, $acceptableMimes: [CloudfrontVideoAcceptableMime]) {
+ getVideo(id: $videoId, password: $password) {
+ ... on RegularUserVideo {
+ id
+ nullableRawCdnUrl(acceptableMimes: $acceptableMimes, password: $password) {
+ url
+ __typename
+ }
+ __typename
+ }
+ __typename
+ }
+ }\n'''),
+ 'FetchVideoTranscript': textwrap.dedent('''\
+ query FetchVideoTranscript($videoId: ID!, $password: String) {
+ fetchVideoTranscript(videoId: $videoId, password: $password) {
+ ... on VideoTranscriptDetails {
+ id
+ video_id
+ source_url
+ captions_source_url
+ __typename
+ }
+ ... on GenericError {
+ message
+ __typename
+ }
+ __typename
+ }
+ }\n'''),
+ 'FetchChapters': textwrap.dedent('''\
+ query FetchChapters($videoId: ID!, $password: String) {
+ fetchVideoChapters(videoId: $videoId, password: $password) {
+ ... on VideoChapters {
+ video_id
+ content
+ __typename
+ }
+ ... on EmptyChaptersPayload {
+ content
+ __typename
+ }
+ ... on InvalidRequestWarning {
+ message
+ __typename
+ }
+ ... on Error {
+ message
+ __typename
+ }
+ __typename
+ }
+ }\n'''),
+ }
+ _APOLLO_GRAPHQL_VERSION = '0a1856c'
+
+ def _call_graphql_api(self, operations, video_id, note=None, errnote=None):
+ password = self.get_param('videopassword')
+ return self._download_json(
+ 'https://www.loom.com/graphql', video_id, note or 'Downloading GraphQL JSON',
+ errnote or 'Failed to download GraphQL JSON', headers={
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'x-loom-request-source': f'loom_web_{self._APOLLO_GRAPHQL_VERSION}',
+ 'apollographql-client-name': 'web',
+ 'apollographql-client-version': self._APOLLO_GRAPHQL_VERSION,
+ }, data=json.dumps([{
+ 'operationName': operation_name,
+ 'variables': {
+ 'videoId': video_id,
+ 'password': password,
+ **self._GRAPHQL_VARIABLES.get(operation_name, {}),
+ },
+ 'query': self._GRAPHQL_QUERIES[operation_name],
+ } for operation_name in variadic(operations)], separators=(',', ':')).encode())
+
+ def _call_url_api(self, endpoint, video_id):
+ response = self._download_json(
+ f'https://www.loom.com/api/campaigns/sessions/{video_id}/{endpoint}', video_id,
+ f'Downloading {endpoint} JSON', f'Failed to download {endpoint} JSON', fatal=False,
+ headers={'Accept': 'application/json', 'Content-Type': 'application/json'},
+ data=json.dumps({
+ 'anonID': str(uuid.uuid4()),
+ 'deviceID': None,
+ 'force_original': False, # HTTP error 401 if True
+ 'password': self.get_param('videopassword'),
+ }, separators=(',', ':')).encode())
+ return traverse_obj(response, ('url', {url_or_none}))
+
+ def _extract_formats(self, video_id, metadata, gql_data):
+ formats = []
+ video_properties = traverse_obj(metadata, ('video_properties', {
+ 'width': ('width', {int_or_none}),
+ 'height': ('height', {int_or_none}),
+ 'acodec': ('microphone_enabled', {lambda x: 'none' if x is False else None}),
+ }))
+
+ def get_formats(format_url, format_id, quality):
+ if not format_url:
+ return
+ ext = determine_ext(format_url)
+ query = urllib.parse.urlparse(format_url).query
+
+ if ext == 'm3u8':
+ # Extract pre-merged HLS formats to avoid buggy parsing of metadata in split playlists
+ format_url = format_url.replace('-split.m3u8', '.m3u8')
+ m3u8_formats = self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', m3u8_id=f'hls-{format_id}', fatal=False, quality=quality)
+ for fmt in m3u8_formats:
+ yield {
+ **fmt,
+ 'url': update_url(fmt['url'], query=query),
+ 'extra_param_to_segment_url': query,
+ }
+
+ elif ext == 'mpd':
+ dash_formats = self._extract_mpd_formats(
+ format_url, video_id, mpd_id=f'dash-{format_id}', fatal=False)
+ for fmt in dash_formats:
+ yield {
+ **fmt,
+ 'extra_param_to_segment_url': query,
+ 'quality': quality,
+ }
+
+ else:
+ yield {
+ 'url': format_url,
+ 'ext': ext,
+ 'format_id': f'http-{format_id}',
+ 'quality': quality,
+ **video_properties,
+ }
+
+ raw_url = self._call_url_api('raw-url', video_id)
+ formats.extend(get_formats(raw_url, 'raw', quality=1)) # original quality
+
+ transcoded_url = self._call_url_api('transcoded-url', video_id)
+ formats.extend(get_formats(transcoded_url, 'transcoded', quality=-1)) # transcoded quality
+
+ cdn_url = get_first(gql_data, ('data', 'getVideo', 'nullableRawCdnUrl', 'url', {url_or_none}))
+ # cdn_url is usually a dupe, but the raw-url/transcoded-url endpoints could return errors
+ valid_urls = [update_url(url, query=None) for url in (raw_url, transcoded_url) if url]
+ if cdn_url and update_url(cdn_url, query=None) not in valid_urls:
+ formats.extend(get_formats(cdn_url, 'cdn', quality=0)) # could be original or transcoded
+
+ return formats
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ metadata = get_first(
+ self._call_graphql_api('GetVideoSSR', video_id, 'Downloading GraphQL metadata JSON'),
+ ('data', 'getVideo', {dict})) or {}
+
+ if metadata.get('__typename') == 'VideoPasswordMissingOrIncorrect':
+ if not self.get_param('videopassword'):
+ raise ExtractorError(
+ 'This video is password-protected, use the --video-password option', expected=True)
+ raise ExtractorError('Invalid video password', expected=True)
+
+ gql_data = self._call_graphql_api(['FetchChapters', 'FetchVideoTranscript', 'GetVideoSource'], video_id)
+ duration = traverse_obj(metadata, ('video_properties', 'duration', {int_or_none}))
+
+ return {
+ 'id': video_id,
+ 'duration': duration,
+ 'chapters': self._extract_chapters_from_description(
+ get_first(gql_data, ('data', 'fetchVideoChapters', 'content', {str})), duration) or None,
+ 'formats': self._extract_formats(video_id, metadata, gql_data),
+ 'subtitles': filter_dict({
+ 'en': traverse_obj(gql_data, (
+ ..., 'data', 'fetchVideoTranscript',
+ ('source_url', 'captions_source_url'), {
+ 'url': {url_or_none},
+ })) or None,
+ }),
+ **traverse_obj(metadata, {
+ 'title': ('name', {str}),
+ 'description': ('description', {str}),
+ 'uploader': ('owner', 'display_name', {str}),
+ 'timestamp': ('createdAt', {parse_iso8601}),
+ }),
+ }
+
+
+class LoomFolderIE(InfoExtractor):
+ IE_NAME = 'loom:folder'
+ _VALID_URL = r'https?://(?:www\.)?loom\.com/share/folder/(?P<id>[\da-f]{32})'
+ _TESTS = [{
+ # 2 subfolders, no videos in root
+ 'url': 'https://www.loom.com/share/folder/997db4db046f43e5912f10dc5f817b5c',
+ 'playlist_mincount': 16,
+ 'info_dict': {
+ 'id': '997db4db046f43e5912f10dc5f817b5c',
+ 'title': 'Blending Lessons',
+ },
+ }, {
+ # only videos, no subfolders
+ 'url': 'https://www.loom.com/share/folder/9a8a87f6b6f546d9a400c8e7575ff7f2',
+ 'playlist_mincount': 12,
+ 'info_dict': {
+ 'id': '9a8a87f6b6f546d9a400c8e7575ff7f2',
+ 'title': 'List A- a, i, o',
+ },
+ }, {
+ # videos in root and empty subfolder
+ 'url': 'https://www.loom.com/share/folder/886e534218c24fd292e97e9563078cc4',
+ 'playlist_mincount': 21,
+ 'info_dict': {
+ 'id': '886e534218c24fd292e97e9563078cc4',
+ 'title': 'Medicare Agent Training videos',
+ },
+ }, {
+ # videos in root and videos in subfolders
+ 'url': 'https://www.loom.com/share/folder/b72c4ecdf04745da9403926d80a40c38',
+ 'playlist_mincount': 21,
+ 'info_dict': {
+ 'id': 'b72c4ecdf04745da9403926d80a40c38',
+ 'title': 'Quick Altos Q & A Tutorials',
+ },
+ }, {
+ # recursive folder extraction
+ 'url': 'https://www.loom.com/share/folder/8b458a94e0e4449b8df9ea7a68fafc4e',
+ 'playlist_count': 23,
+ 'info_dict': {
+ 'id': '8b458a94e0e4449b8df9ea7a68fafc4e',
+ 'title': 'Sezer Texting Guide',
+ },
+ }, {
+ # more than 50 videos in 1 folder
+ 'url': 'https://www.loom.com/share/folder/e056a91d290d47ca9b00c9d1df56c463',
+ 'playlist_mincount': 61,
+ 'info_dict': {
+ 'id': 'e056a91d290d47ca9b00c9d1df56c463',
+ 'title': 'User Videos',
+ },
+ }, {
+ # many subfolders
+ 'url': 'https://www.loom.com/share/folder/c2dde8cc67454f0e99031677279d8954',
+ 'playlist_mincount': 75,
+ 'info_dict': {
+ 'id': 'c2dde8cc67454f0e99031677279d8954',
+ 'title': 'Honors 1',
+ },
+ }, {
+ 'url': 'https://www.loom.com/share/folder/bae17109a68146c7803454f2893c8cf8/Edpuzzle',
+ 'only_matching': True,
+ }]
+
+ def _extract_folder_data(self, folder_id):
+ return self._download_json(
+ f'https://www.loom.com/v1/folders/{folder_id}', folder_id,
+ 'Downloading folder info JSON', query={'limit': '10000'})
+
+ def _extract_folder_entries(self, folder_id, initial_folder_data=None):
+ folder_data = initial_folder_data or self._extract_folder_data(folder_id)
+
+ for video in traverse_obj(folder_data, ('videos', lambda _, v: v['id'])):
+ video_id = video['id']
+ yield self.url_result(
+ f'https://www.loom.com/share/{video_id}', LoomIE, video_id, video.get('name'))
+
+ # Recurse into subfolders
+ for subfolder_id in traverse_obj(folder_data, (
+ 'folders', lambda _, v: v['id'] != folder_id, 'id', {str})):
+ yield from self._extract_folder_entries(subfolder_id)
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+ playlist_data = self._extract_folder_data(playlist_id)
+
+ return self.playlist_result(
+ self._extract_folder_entries(playlist_id, playlist_data), playlist_id,
+ traverse_obj(playlist_data, ('folder', 'name', {str.strip})))
diff --git a/yt_dlp/extractor/masters.py b/yt_dlp/extractor/masters.py
index 716f1c9..c3c58d7 100644
--- a/yt_dlp/extractor/masters.py
+++ b/yt_dlp/extractor/masters.py
@@ -1,4 +1,3 @@
-from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
traverse_obj,
diff --git a/yt_dlp/extractor/medici.py b/yt_dlp/extractor/medici.py
index 328ccd2..b6235b6 100644
--- a/yt_dlp/extractor/medici.py
+++ b/yt_dlp/extractor/medici.py
@@ -1,67 +1,153 @@
+import urllib.parse
+
from .common import InfoExtractor
from ..utils import (
- unified_strdate,
- update_url_query,
- urlencode_postdata,
+ filter_dict,
+ parse_iso8601,
+ traverse_obj,
+ try_call,
+ url_or_none,
)
class MediciIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?medici\.tv/#!/(?P<id>[^?#&]+)'
- _TEST = {
- 'url': 'http://www.medici.tv/#!/daniel-harding-frans-helmerson-verbier-festival-music-camp',
- 'md5': '004c21bb0a57248085b6ff3fec72719d',
+ _VALID_URL = r'https?://(?:(?P<sub>www|edu)\.)?medici\.tv/[a-z]{2}/[\w.-]+/(?P<id>[^/?#&]+)'
+ _TESTS = [{
+ 'url': 'https://www.medici.tv/en/operas/thomas-ades-the-exterminating-angel-calixto-bieito-opera-bastille-paris',
+ 'md5': 'd483f74e7a7a9eac0dbe152ab189050d',
+ 'info_dict': {
+ 'id': '8032',
+ 'ext': 'mp4',
+ 'title': 'Thomas Adès\'s The Exterminating Angel',
+ 'description': 'md5:708ae6350dadc604225b4a6e32482bab',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'upload_date': '20240304',
+ 'timestamp': 1709561766,
+ 'display_id': 'thomas-ades-the-exterminating-angel-calixto-bieito-opera-bastille-paris',
+ },
+ 'expected_warnings': [r'preview'],
+ }, {
+ 'url': 'https://edu.medici.tv/en/operas/wagner-lohengrin-paris-opera-kirill-serebrennikov-piotr-beczala-kwangchul-youn-johanni-van-oostrum',
+ 'md5': '4ef3f4079a6e1c617584463a9eb84f99',
+ 'info_dict': {
+ 'id': '7900',
+ 'ext': 'mp4',
+ 'title': 'Wagner\'s Lohengrin',
+ 'description': 'md5:a384a62937866101f86902f21752cd89',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'upload_date': '20231017',
+ 'timestamp': 1697554771,
+ 'display_id': 'wagner-lohengrin-paris-opera-kirill-serebrennikov-piotr-beczala-kwangchul-youn-johanni-van-oostrum',
+ },
+ 'expected_warnings': [r'preview'],
+ }, {
+ 'url': 'https://www.medici.tv/en/concerts/sergey-smbatyan-conducts-mansurian-chouchane-siranossian-mario-brunello',
+ 'md5': '9dd757e53b22b2511e85ea9ea60e4815',
+ 'info_dict': {
+ 'id': '5712',
+ 'ext': 'mp4',
+ 'title': 'Sergey Smbatyan conducts Tigran Mansurian — With Chouchane Siranossian and Mario Brunello',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'description': 'md5:9411fe44c874bb10e9af288c65816e41',
+ 'upload_date': '20200323',
+ 'timestamp': 1584975600,
+ 'display_id': 'sergey-smbatyan-conducts-mansurian-chouchane-siranossian-mario-brunello',
+ },
+ 'expected_warnings': [r'preview'],
+ }, {
+ 'url': 'https://www.medici.tv/en/ballets/carmen-ballet-choregraphie-de-jiri-bubenicek-teatro-dellopera-di-roma',
+ 'md5': '40f5e76cb701a97a6d7ba23b62c49990',
+ 'info_dict': {
+ 'id': '7857',
+ 'ext': 'mp4',
+ 'title': 'Carmen by Jiří Bubeníček after Roland Petit, music by Bizet, de Falla, Castelnuovo-Tedesco, and Bonolis',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'description': 'md5:0f15a15611ed748020c769873e10a8bb',
+ 'upload_date': '20240223',
+ 'timestamp': 1708707600,
+ 'display_id': 'carmen-ballet-choregraphie-de-jiri-bubenicek-teatro-dellopera-di-roma',
+ },
+ 'expected_warnings': [r'preview'],
+ }, {
+ 'url': 'https://www.medici.tv/en/documentaries/la-sonnambula-liege-2023-documentaire',
+ 'md5': '87ff198018ce79a34757ab0dd6f21080',
+ 'info_dict': {
+ 'id': '7513',
+ 'ext': 'mp4',
+ 'title': 'La Sonnambula',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'description': 'md5:0caf9109a860fd50cd018df062a67f34',
+ 'upload_date': '20231103',
+ 'timestamp': 1699010830,
+ 'display_id': 'la-sonnambula-liege-2023-documentaire',
+ },
+ 'expected_warnings': [r'preview'],
+ }, {
+ 'url': 'https://edu.medici.tv/en/masterclasses/yvonne-loriod-olivier-messiaen',
+ 'md5': 'fb5dcec46d76ad20fbdbaabb01da191d',
+ 'info_dict': {
+ 'id': '3024',
+ 'ext': 'mp4',
+ 'title': 'Olivier Messiaen and Yvonne Loriod, pianists and teachers',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'description': 'md5:aab948e2f7690214b5c28896c83f1fc1',
+ 'upload_date': '20150223',
+ 'timestamp': 1424706608,
+ 'display_id': 'yvonne-loriod-olivier-messiaen',
+ },
+ 'skip': 'Requires authentication; preview starts in the middle',
+ }, {
+ 'url': 'https://www.medici.tv/en/jazz/makaya-mccraven-la-rochelle',
+ 'md5': '4cc279a8b06609782747c8f50beea2b3',
'info_dict': {
- 'id': '3059',
- 'ext': 'flv',
- 'title': 'Daniel Harding conducts the Verbier Festival Music Camp \u2013 With Frans Helmerson',
- 'description': 'md5:322a1e952bafb725174fd8c1a8212f58',
- 'thumbnail': r're:^https?://.*\.jpg$',
- 'upload_date': '20170408',
+ 'id': '7922',
+ 'ext': 'mp4',
+ 'title': 'NEW: Makaya McCraven in La Rochelle',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'description': 'md5:b5a8aaeb6993d8ccb18bde8abb8aa8d2',
+ 'upload_date': '20231228',
+ 'timestamp': 1703754863,
+ 'display_id': 'makaya-mccraven-la-rochelle',
},
- }
+ 'expected_warnings': [r'preview'],
+ }]
def _real_extract(self, url):
- video_id = self._match_id(url)
-
- # Sets csrftoken cookie
- self._download_webpage(url, video_id)
+ display_id, subdomain = self._match_valid_url(url).group('id', 'sub')
+ self._request_webpage(url, display_id, 'Requesting CSRF token cookie')
- MEDICI_URL = 'http://www.medici.tv/'
+ subdomain = 'edu-' if subdomain == 'edu' else ''
+ origin = f'https://{urllib.parse.urlparse(url).hostname}'
data = self._download_json(
- MEDICI_URL, video_id,
- data=urlencode_postdata({
- 'json': 'true',
- 'page': '/%s' % video_id,
- 'timezone_offset': -420,
- }), headers={
- 'X-CSRFToken': self._get_cookies(url)['csrftoken'].value,
- 'X-Requested-With': 'XMLHttpRequest',
- 'Referer': MEDICI_URL,
- 'Content-Type': 'application/x-www-form-urlencoded',
- })
-
- video = data['video']['videos']['video1']
-
- title = video.get('nom') or data['title']
+ f'https://api.medici.tv/{subdomain}satie/edito/movie-file/{display_id}/', display_id,
+ headers=filter_dict({
+ 'Authorization': try_call(
+ lambda: urllib.parse.unquote(self._get_cookies(url)['auth._token.mAuth'].value)),
+ 'Device-Type': 'web',
+ 'Origin': origin,
+ 'Referer': f'{origin}/',
+ 'Accept': 'application/json, text/plain, */*',
+ }))
- video_id = video.get('id') or video_id
- formats = self._extract_f4m_formats(
- update_url_query(video['url_akamai'], {
- 'hdcore': '3.1.0',
- 'plugin=aasp': '3.1.0.43.124',
- }), video_id, f4m_id='hds')
+ if not traverse_obj(data, ('video', 'is_full_video')) and traverse_obj(
+ data, ('video', 'is_limited_by_user_access')):
+ self.report_warning(
+ 'The full video is for subscribers only. Only previews will be downloaded. If you '
+ 'have used the --cookies-from-browser option, try using the --cookies option instead')
- description = data.get('meta_description')
- thumbnail = video.get('url_thumbnail') or data.get('main_image')
- upload_date = unified_strdate(data['video'].get('date'))
+ formats, subtitles = self._extract_m3u8_formats_and_subtitles(
+ data['video']['video_url'], display_id, 'mp4')
return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'upload_date': upload_date,
+ 'id': str(data['id']),
+ 'display_id': display_id,
'formats': formats,
+ 'subtitles': subtitles,
+ **traverse_obj(data, {
+ 'title': ('title', {str}),
+ 'description': ('subtitle', {str}),
+ 'thumbnail': ('picture', {url_or_none}),
+ 'timestamp': ('date_publish', {parse_iso8601}),
+ }),
}
diff --git a/yt_dlp/extractor/microsoftstream.py b/yt_dlp/extractor/microsoftstream.py
index 9b50996..5f5f160 100644
--- a/yt_dlp/extractor/microsoftstream.py
+++ b/yt_dlp/extractor/microsoftstream.py
@@ -1,4 +1,4 @@
-from base64 import b64decode
+import base64
from .common import InfoExtractor
from ..utils import (
@@ -81,7 +81,7 @@ class MicrosoftStreamIE(InfoExtractor):
'url': thumbnail_url,
}
thumb_name = url_basename(thumbnail_url)
- thumb_name = str(b64decode(thumb_name + '=' * (-len(thumb_name) % 4)))
+ thumb_name = str(base64.b64decode(thumb_name + '=' * (-len(thumb_name) % 4)))
thumb.update(parse_resolution(thumb_name))
thumbnails.append(thumb)
diff --git a/yt_dlp/extractor/mixch.py b/yt_dlp/extractor/mixch.py
index 4be6947..b980fd0 100644
--- a/yt_dlp/extractor/mixch.py
+++ b/yt_dlp/extractor/mixch.py
@@ -1,5 +1,7 @@
from .common import InfoExtractor
-from ..utils import UserNotLive, traverse_obj
+from ..networking.exceptions import HTTPError
+from ..utils import ExtractorError, UserNotLive, int_or_none, url_or_none
+from ..utils.traversal import traverse_obj
class MixchIE(InfoExtractor):
@@ -25,25 +27,23 @@ class MixchIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(f'https://mixch.tv/u/{video_id}/live', video_id)
-
- initial_js_state = self._parse_json(self._search_regex(
- r'(?m)^\s*window\.__INITIAL_JS_STATE__\s*=\s*(\{.+?\});\s*$', webpage, 'initial JS state'), video_id)
- if not initial_js_state.get('liveInfo'):
+ data = self._download_json(f'https://mixch.tv/api-web/users/{video_id}/live', video_id)
+ if not traverse_obj(data, ('liveInfo', {dict})):
raise UserNotLive(video_id=video_id)
return {
'id': video_id,
- 'title': traverse_obj(initial_js_state, ('liveInfo', 'title')),
- 'comment_count': traverse_obj(initial_js_state, ('liveInfo', 'comments')),
- 'view_count': traverse_obj(initial_js_state, ('liveInfo', 'visitor')),
- 'timestamp': traverse_obj(initial_js_state, ('liveInfo', 'created')),
- 'uploader': traverse_obj(initial_js_state, ('broadcasterInfo', 'name')),
'uploader_id': video_id,
+ **traverse_obj(data, {
+ 'title': ('liveInfo', 'title', {str}),
+ 'comment_count': ('liveInfo', 'comments', {int_or_none}),
+ 'view_count': ('liveInfo', 'visitor', {int_or_none}),
+ 'timestamp': ('liveInfo', 'created', {int_or_none}),
+ 'uploader': ('broadcasterInfo', 'name', {str}),
+ }),
'formats': [{
'format_id': 'hls',
- 'url': (traverse_obj(initial_js_state, ('liveInfo', 'hls'))
- or f'https://d1hd0ww6piyb43.cloudfront.net/hls/torte_{video_id}.m3u8'),
+ 'url': data['liveInfo']['hls'],
'ext': 'mp4',
'protocol': 'm3u8',
}],
@@ -60,22 +60,38 @@ class MixchArchiveIE(InfoExtractor):
'skip': 'paid video, no DRM. expires at Jan 23',
'info_dict': {
'id': '421',
+ 'ext': 'mp4',
'title': '96NEKO SHOW TIME',
}
+ }, {
+ 'url': 'https://mixch.tv/archive/1213',
+ 'skip': 'paid video, no DRM. expires at Dec 31, 2023',
+ 'info_dict': {
+ 'id': '1213',
+ 'ext': 'mp4',
+ 'title': '【特別トーク番組アーカイブス】Merm4id×燐舞曲 2nd LIVE「VERSUS」',
+ 'release_date': '20231201',
+ 'thumbnail': str,
+ }
+ }, {
+ 'url': 'https://mixch.tv/archive/1214',
+ 'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
- html5_videos = self._parse_html5_media_entries(
- url, webpage.replace('video-js', 'video'), video_id, 'hls')
- if not html5_videos:
- self.raise_login_required(method='cookies')
- infodict = html5_videos[0]
- infodict.update({
- 'id': video_id,
- 'title': self._html_search_regex(r'class="archive-title">(.+?)</', webpage, 'title')
- })
+ try:
+ info_json = self._download_json(
+ f'https://mixch.tv/api-web/archive/{video_id}', video_id)['archive']
+ except ExtractorError as e:
+ if isinstance(e.cause, HTTPError) and e.cause.status == 401:
+ self.raise_login_required()
+ raise
- return infodict
+ return {
+ 'id': video_id,
+ 'title': traverse_obj(info_json, ('title', {str})),
+ 'formats': self._extract_m3u8_formats(info_json['archiveURL'], video_id),
+ 'thumbnail': traverse_obj(info_json, ('thumbnailURL', {url_or_none})),
+ }
diff --git a/yt_dlp/extractor/motherless.py b/yt_dlp/extractor/motherless.py
index 160150a..b6c18fe 100644
--- a/yt_dlp/extractor/motherless.py
+++ b/yt_dlp/extractor/motherless.py
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
import re
import urllib.parse
@@ -151,7 +151,7 @@ class MotherlessIE(InfoExtractor):
'd': 'days',
}
kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
- upload_date = (datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
+ upload_date = (dt.datetime.now(dt.timezone.utc) - dt.timedelta(**kwargs)).strftime('%Y%m%d')
comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage))
uploader_id = self._html_search_regex(
diff --git a/yt_dlp/extractor/naver.py b/yt_dlp/extractor/naver.py
index 806b790..885557e 100644
--- a/yt_dlp/extractor/naver.py
+++ b/yt_dlp/extractor/naver.py
@@ -4,8 +4,8 @@ import hmac
import itertools
import json
import re
+import urllib.parse
import time
-from urllib.parse import parse_qs, urlparse
from .common import InfoExtractor
from ..utils import (
@@ -388,7 +388,7 @@ class NaverNowIE(NaverBaseIE):
def _real_extract(self, url):
show_id = self._match_id(url)
- qs = parse_qs(urlparse(url).query)
+ qs = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
if not self._yes_playlist(show_id, qs.get('shareHightlight')):
return self._extract_highlight(show_id, qs['shareHightlight'][0])
diff --git a/yt_dlp/extractor/neteasemusic.py b/yt_dlp/extractor/neteasemusic.py
index d332b84..73b33a9 100644
--- a/yt_dlp/extractor/neteasemusic.py
+++ b/yt_dlp/extractor/neteasemusic.py
@@ -1,9 +1,9 @@
+import hashlib
import itertools
import json
+import random
import re
import time
-from hashlib import md5
-from random import randint
from .common import InfoExtractor
from ..aes import aes_ecb_encrypt, pkcs7_padding
@@ -34,7 +34,7 @@ class NetEaseMusicBaseIE(InfoExtractor):
request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':'))
message = f'nobody{api_path}use{request_text}md5forencrypt'.encode('latin1')
- msg_digest = md5(message).hexdigest()
+ msg_digest = hashlib.md5(message).hexdigest()
data = pkcs7_padding(list(str.encode(
f'{api_path}-36cd479b6b5-{request_text}-36cd479b6b5-{msg_digest}')))
@@ -53,7 +53,7 @@ class NetEaseMusicBaseIE(InfoExtractor):
'__csrf': '',
'os': 'pc',
'channel': 'undefined',
- 'requestId': f'{int(time.time() * 1000)}_{randint(0, 1000):04}',
+ 'requestId': f'{int(time.time() * 1000)}_{random.randint(0, 1000):04}',
**traverse_obj(self._get_cookies(self._API_BASE), {
'MUSIC_U': ('MUSIC_U', {lambda i: i.value}),
})
diff --git a/yt_dlp/extractor/nhk.py b/yt_dlp/extractor/nhk.py
index 7cf5b24..8bb017a 100644
--- a/yt_dlp/extractor/nhk.py
+++ b/yt_dlp/extractor/nhk.py
@@ -8,6 +8,7 @@ from ..utils import (
int_or_none,
join_nonempty,
parse_duration,
+ remove_end,
traverse_obj,
try_call,
unescapeHTML,
@@ -19,8 +20,7 @@ from ..utils import (
class NhkBaseIE(InfoExtractor):
_API_URL_TEMPLATE = 'https://nwapi.nhk.jp/nhkworld/%sod%slist/v7b/%s/%s/%s/all%s.json'
- _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
- _TYPE_REGEX = r'/(?P<type>video|audio)/'
+ _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/'
def _call_api(self, m_id, lang, is_video, is_episode, is_clip):
return self._download_json(
@@ -83,7 +83,7 @@ class NhkBaseIE(InfoExtractor):
def _extract_episode_info(self, url, episode=None):
fetch_episode = episode is None
lang, m_type, episode_id = NhkVodIE._match_valid_url(url).group('lang', 'type', 'id')
- is_video = m_type == 'video'
+ is_video = m_type != 'audio'
if is_video:
episode_id = episode_id[:4] + '-' + episode_id[4:]
@@ -138,9 +138,10 @@ class NhkBaseIE(InfoExtractor):
else:
if fetch_episode:
- audio_path = episode['audio']['audio']
+ # From https://www3.nhk.or.jp/nhkworld/common/player/radio/inline/rod.html
+ audio_path = remove_end(episode['audio']['audio'], '.m4a')
info['formats'] = self._extract_m3u8_formats(
- 'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,
+ f'{urljoin("https://vod-stream.nhk.jp", audio_path)}/index.m3u8',
episode_id, 'm4a', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
for f in info['formats']:
@@ -155,9 +156,11 @@ class NhkBaseIE(InfoExtractor):
class NhkVodIE(NhkBaseIE):
- # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg
- _VALID_URL = [rf'{NhkBaseIE._BASE_URL_REGEX}/(?P<type>video)/(?P<id>[0-9a-z]+)',
- rf'{NhkBaseIE._BASE_URL_REGEX}/(?P<type>audio)/(?P<id>[^/?#]+?-\d{{8}}-[0-9a-z]+)']
+ _VALID_URL = [
+ rf'{NhkBaseIE._BASE_URL_REGEX}shows/(?:(?P<type>video)/)?(?P<id>\d{{4}}[\da-z]\d+)/?(?:$|[?#])',
+ rf'{NhkBaseIE._BASE_URL_REGEX}(?:ondemand|shows)/(?P<type>audio)/(?P<id>[^/?#]+?-\d{{8}}-[\da-z]+)',
+ rf'{NhkBaseIE._BASE_URL_REGEX}ondemand/(?P<type>video)/(?P<id>\d{{4}}[\da-z]\d+)', # deprecated
+ ]
# Content available only for a limited period of time. Visit
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
_TESTS = [{
@@ -167,17 +170,16 @@ class NhkVodIE(NhkBaseIE):
'ext': 'mp4',
'title': 'Japan Railway Journal - The Tohoku Shinkansen: Full Speed Ahead',
'description': 'md5:49f7c5b206e03868a2fdf0d0814b92f6',
- 'thumbnail': 'md5:51bcef4a21936e7fea1ff4e06353f463',
+ 'thumbnail': r're:https://.+/.+\.jpg',
'episode': 'The Tohoku Shinkansen: Full Speed Ahead',
'series': 'Japan Railway Journal',
- 'modified_timestamp': 1694243656,
+ 'modified_timestamp': 1707217907,
'timestamp': 1681428600,
'release_timestamp': 1693883728,
'duration': 1679,
'upload_date': '20230413',
- 'modified_date': '20230909',
+ 'modified_date': '20240206',
'release_date': '20230905',
-
},
}, {
# video clip
@@ -188,15 +190,15 @@ class NhkVodIE(NhkBaseIE):
'ext': 'mp4',
'title': 'Dining with the Chef - Chef Saito\'s Family recipe: MENCHI-KATSU',
'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',
- 'thumbnail': 'md5:d6a4d9b6e9be90aaadda0bcce89631ed',
+ 'thumbnail': r're:https://.+/.+\.jpg',
'series': 'Dining with the Chef',
'episode': 'Chef Saito\'s Family recipe: MENCHI-KATSU',
'duration': 148,
'upload_date': '20190816',
'release_date': '20230902',
'release_timestamp': 1693619292,
- 'modified_timestamp': 1694168033,
- 'modified_date': '20230908',
+ 'modified_timestamp': 1707217907,
+ 'modified_date': '20240206',
'timestamp': 1565997540,
},
}, {
@@ -208,7 +210,7 @@ class NhkVodIE(NhkBaseIE):
'title': 'Living in Japan - Tips for Travelers to Japan / Ramen Vending Machines',
'series': 'Living in Japan',
'description': 'md5:0a0e2077d8f07a03071e990a6f51bfab',
- 'thumbnail': 'md5:960622fb6e06054a4a1a0c97ea752545',
+ 'thumbnail': r're:https://.+/.+\.jpg',
'episode': 'Tips for Travelers to Japan / Ramen Vending Machines'
},
}, {
@@ -245,7 +247,7 @@ class NhkVodIE(NhkBaseIE):
'title': 'おはよう日本(7時台) - 10月8日放送',
'series': 'おはよう日本(7時台)',
'episode': '10月8日放送',
- 'thumbnail': 'md5:d733b1c8e965ab68fb02b2d347d0e9b4',
+ 'thumbnail': r're:https://.+/.+\.jpg',
'description': 'md5:9c1d6cbeadb827b955b20e99ab920ff0',
},
'skip': 'expires 2023-10-15',
@@ -255,17 +257,100 @@ class NhkVodIE(NhkBaseIE):
'info_dict': {
'id': 'nw_vod_v_en_3004_952_20230723091000_01_1690074552',
'ext': 'mp4',
- 'title': 'Barakan Discovers AMAMI OSHIMA: Isson\'s Treasure Island',
+ 'title': 'Barakan Discovers - AMAMI OSHIMA: Isson\'s Treasure Isla',
'description': 'md5:5db620c46a0698451cc59add8816b797',
- 'thumbnail': 'md5:67d9ff28009ba379bfa85ad1aaa0e2bd',
+ 'thumbnail': r're:https://.+/.+\.jpg',
'release_date': '20230905',
'timestamp': 1690103400,
'duration': 2939,
'release_timestamp': 1693898699,
- 'modified_timestamp': 1698057495,
- 'modified_date': '20231023',
'upload_date': '20230723',
+ 'modified_timestamp': 1707217907,
+ 'modified_date': '20240206',
+ 'episode': 'AMAMI OSHIMA: Isson\'s Treasure Isla',
+ 'series': 'Barakan Discovers',
+ },
+ }, {
+ # /ondemand/video/ url with alphabetical character in 5th position of id
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a07/',
+ 'info_dict': {
+ 'id': 'nw_c_en_9999-a07',
+ 'ext': 'mp4',
+ 'episode': 'Mini-Dramas on SDGs: Ep 1 Close the Gender Gap [Director\'s Cut]',
+ 'series': 'Mini-Dramas on SDGs',
+ 'modified_date': '20240206',
+ 'title': 'Mini-Dramas on SDGs - Mini-Dramas on SDGs: Ep 1 Close the Gender Gap [Director\'s Cut]',
+ 'description': 'md5:3f9dcb4db22fceb675d90448a040d3f6',
+ 'timestamp': 1621962360,
+ 'duration': 189,
+ 'release_date': '20230903',
+ 'modified_timestamp': 1707217907,
+ 'upload_date': '20210525',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'release_timestamp': 1693713487,
+ },
+ }, {
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999d17/',
+ 'info_dict': {
+ 'id': 'nw_c_en_9999-d17',
+ 'ext': 'mp4',
+ 'title': 'Flowers of snow blossom - The 72 Pentads of Yamato',
+ 'description': 'Today’s focus: Snow',
+ 'release_timestamp': 1693792402,
+ 'release_date': '20230904',
+ 'upload_date': '20220128',
+ 'timestamp': 1643370960,
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'duration': 136,
+ 'series': '',
+ 'modified_date': '20240206',
+ 'modified_timestamp': 1707217907,
+ },
+ }, {
+ # new /shows/ url format
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/2032307/',
+ 'info_dict': {
+ 'id': 'nw_vod_v_en_2032_307_20240321113000_01_1710990282',
+ 'ext': 'mp4',
+ 'title': 'Japanology Plus - 20th Anniversary Special Part 1',
+ 'description': 'md5:817d41fc8e54339ad2a916161ea24faf',
+ 'episode': '20th Anniversary Special Part 1',
+ 'series': 'Japanology Plus',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'duration': 1680,
+ 'timestamp': 1711020600,
+ 'upload_date': '20240321',
+ 'release_timestamp': 1711022683,
+ 'release_date': '20240321',
+ 'modified_timestamp': 1711031012,
+ 'modified_date': '20240321',
+ },
+ }, {
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/3020025/',
+ 'info_dict': {
+ 'id': 'nw_vod_v_en_3020_025_20230325144000_01_1679723944',
+ 'ext': 'mp4',
+ 'title': '100 Ideas to Save the World - Working Styles Evolve',
+ 'description': 'md5:9e6c7778eaaf4f7b4af83569649f84d9',
+ 'episode': 'Working Styles Evolve',
+ 'series': '100 Ideas to Save the World',
+ 'thumbnail': r're:https://.+/.+\.jpg',
+ 'duration': 899,
+ 'upload_date': '20230325',
+ 'timestamp': 1679755200,
+ 'release_date': '20230905',
+ 'release_timestamp': 1693880540,
+ 'modified_date': '20240206',
+ 'modified_timestamp': 1707217907,
},
+ }, {
+ # new /shows/audio/ url format
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/audio/livinginjapan-20231001-1/',
+ 'only_matching': True,
+ }, {
+ # valid url even if can't be found in wild; support needed for clip entries extraction
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/9999o80/',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -273,18 +358,21 @@ class NhkVodIE(NhkBaseIE):
class NhkVodProgramIE(NhkBaseIE):
- _VALID_URL = rf'{NhkBaseIE._BASE_URL_REGEX}/program{NhkBaseIE._TYPE_REGEX}(?P<id>\w+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?'
+ _VALID_URL = rf'''(?x)
+ {NhkBaseIE._BASE_URL_REGEX}(?:shows|tv)/
+ (?:(?P<type>audio)/programs/)?(?P<id>\w+)/?
+ (?:\?(?:[^#]+&)?type=(?P<episode_type>clip|(?:radio|tv)Episode))?'''
_TESTS = [{
# video program episodes
- 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/sumo',
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/sumo/',
'info_dict': {
'id': 'sumo',
'title': 'GRAND SUMO Highlights',
'description': 'md5:fc20d02dc6ce85e4b72e0273aa52fdbf',
},
- 'playlist_mincount': 0,
+ 'playlist_mincount': 1,
}, {
- 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/japanrailway/',
'info_dict': {
'id': 'japanrailway',
'title': 'Japan Railway Journal',
@@ -293,40 +381,68 @@ class NhkVodProgramIE(NhkBaseIE):
'playlist_mincount': 12,
}, {
# video program clips
- 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/japanrailway/?type=clip',
'info_dict': {
'id': 'japanrailway',
'title': 'Japan Railway Journal',
'description': 'md5:ea39d93af7d05835baadf10d1aae0e3f',
},
- 'playlist_mincount': 5,
- }, {
- 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',
- 'only_matching': True,
+ 'playlist_mincount': 12,
}, {
# audio program
- 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/audio/programs/livinginjapan/',
+ 'info_dict': {
+ 'id': 'livinginjapan',
+ 'title': 'Living in Japan',
+ 'description': 'md5:665bb36ec2a12c5a7f598ee713fc2b54',
+ },
+ 'playlist_mincount': 12,
+ }, {
+ # /tv/ program url
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/tv/designtalksplus/',
+ 'info_dict': {
+ 'id': 'designtalksplus',
+ 'title': 'DESIGN TALKS plus',
+ 'description': 'md5:47b3b3a9f10d4ac7b33b53b70a7d2837',
+ },
+ 'playlist_mincount': 20,
+ }, {
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/10yearshayaomiyazaki/',
'only_matching': True,
}]
+ @classmethod
+ def suitable(cls, url):
+ return False if NhkVodIE.suitable(url) else super().suitable(url)
+
+ def _extract_meta_from_class_elements(self, class_values, html):
+ for class_value in class_values:
+ if value := clean_html(get_element_by_class(class_value, html)):
+ return value
+
def _real_extract(self, url):
lang, m_type, program_id, episode_type = self._match_valid_url(url).group('lang', 'type', 'id', 'episode_type')
episodes = self._call_api(
- program_id, lang, m_type == 'video', False, episode_type == 'clip')
+ program_id, lang, m_type != 'audio', False, episode_type == 'clip')
- entries = []
- for episode in episodes:
- episode_path = episode.get('url')
- if not episode_path:
- continue
- entries.append(self._extract_episode_info(
- urljoin(url, episode_path), episode))
+ def entries():
+ for episode in episodes:
+ if episode_path := episode.get('url'):
+ yield self._extract_episode_info(urljoin(url, episode_path), episode)
html = self._download_webpage(url, program_id)
- program_title = clean_html(get_element_by_class('p-programDetail__title', html))
- program_description = clean_html(get_element_by_class('p-programDetail__text', html))
-
- return self.playlist_result(entries, program_id, program_title, program_description)
+ program_title = self._extract_meta_from_class_elements([
+ 'p-programDetail__title', # /ondemand/program/
+ 'pProgramHero__logoText', # /shows/
+ 'tAudioProgramMain__title', # /shows/audio/programs/
+ 'p-program-name'], html) # /tv/
+ program_description = self._extract_meta_from_class_elements([
+ 'p-programDetail__text', # /ondemand/program/
+ 'pProgramHero__description', # /shows/
+ 'tAudioProgramMain__info', # /shows/audio/programs/
+ 'p-program-description'], html) # /tv/
+
+ return self.playlist_result(entries(), program_id, program_title, program_description)
class NhkForSchoolBangumiIE(InfoExtractor):
diff --git a/yt_dlp/extractor/niconico.py b/yt_dlp/extractor/niconico.py
index 6a46246..b04ce96 100644
--- a/yt_dlp/extractor/niconico.py
+++ b/yt_dlp/extractor/niconico.py
@@ -1,11 +1,10 @@
-import datetime
+import datetime as dt
import functools
import itertools
import json
import re
import time
-
-from urllib.parse import urlparse
+import urllib.parse
from .common import InfoExtractor, SearchInfoExtractor
from ..networking import Request
@@ -820,12 +819,12 @@ class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
'playlist_mincount': 1610,
}]
- _START_DATE = datetime.date(2007, 1, 1)
+ _START_DATE = dt.date(2007, 1, 1)
_RESULTS_PER_PAGE = 32
_MAX_PAGES = 50
def _entries(self, url, item_id, start_date=None, end_date=None):
- start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
+ start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
# If the last page has a full page of videos, we need to break down the query interval further
last_page_len = len(list(self._get_entries_for_date(
@@ -957,7 +956,7 @@ class NiconicoLiveIE(InfoExtractor):
'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
})
- hostname = remove_start(urlparse(urlh.url).hostname, 'sp.')
+ hostname = remove_start(urllib.parse.urlparse(urlh.url).hostname, 'sp.')
latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
if latency not in self._KNOWN_LATENCY:
latency = 'high'
diff --git a/yt_dlp/extractor/panopto.py b/yt_dlp/extractor/panopto.py
index ddea32d..63c5fd6 100644
--- a/yt_dlp/extractor/panopto.py
+++ b/yt_dlp/extractor/panopto.py
@@ -1,8 +1,8 @@
import calendar
-import json
+import datetime as dt
import functools
-from datetime import datetime, timezone
-from random import random
+import json
+import random
from .common import InfoExtractor
from ..compat import (
@@ -243,7 +243,7 @@ class PanoptoIE(PanoptoBaseIE):
invocation_id = delivery_info.get('InvocationId')
stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', ..., 'PublicID'), get_all=False, expected_type=str)
if invocation_id and stream_id and duration:
- timestamp_str = f'/Date({calendar.timegm(datetime.now(timezone.utc).timetuple())}000)/'
+ timestamp_str = f'/Date({calendar.timegm(dt.datetime.now(dt.timezone.utc).timetuple())}000)/'
data = {
'streamRequests': [
{
@@ -415,7 +415,7 @@ class PanoptoIE(PanoptoBaseIE):
'cast': traverse_obj(delivery, ('Contributors', ..., 'DisplayName'), expected_type=lambda x: x or None),
'timestamp': session_start_time - 11640000000 if session_start_time else None,
'duration': delivery.get('Duration'),
- 'thumbnail': base_url + f'/Services/FrameGrabber.svc/FrameRedirect?objectId={video_id}&mode=Delivery&random={random()}',
+ 'thumbnail': base_url + f'/Services/FrameGrabber.svc/FrameRedirect?objectId={video_id}&mode=Delivery&random={random.random()}',
'average_rating': delivery.get('AverageRating'),
'chapters': self._extract_chapters(timestamps),
'uploader': delivery.get('OwnerDisplayName') or None,
diff --git a/yt_dlp/extractor/patreon.py b/yt_dlp/extractor/patreon.py
index d2ddb72..d4f822f 100644
--- a/yt_dlp/extractor/patreon.py
+++ b/yt_dlp/extractor/patreon.py
@@ -92,7 +92,7 @@ class PatreonIE(PatreonBaseIE):
'thumbnail': 're:^https?://.*$',
'upload_date': '20150211',
'description': 'md5:8af6425f50bd46fbf29f3db0fc3a8364',
- 'uploader_id': 'TraciJHines',
+ 'uploader_id': '@TraciHinesMusic',
'categories': ['Entertainment'],
'duration': 282,
'view_count': int,
@@ -106,8 +106,10 @@ class PatreonIE(PatreonBaseIE):
'availability': 'public',
'channel_follower_count': int,
'playable_in_embed': True,
- 'uploader_url': 'http://www.youtube.com/user/TraciJHines',
+ 'uploader_url': 'https://www.youtube.com/@TraciHinesMusic',
'comment_count': int,
+ 'channel_is_verified': True,
+ 'chapters': 'count:4',
},
'params': {
'noplaylist': True,
@@ -176,6 +178,27 @@ class PatreonIE(PatreonBaseIE):
'uploader_url': 'https://www.patreon.com/thenormies',
},
'skip': 'Patron-only content',
+ }, {
+ # dead vimeo and embed URLs, need to extract post_file
+ 'url': 'https://www.patreon.com/posts/hunter-x-hunter-34007913',
+ 'info_dict': {
+ 'id': '34007913',
+ 'ext': 'mp4',
+ 'title': 'Hunter x Hunter | Kurapika DESTROYS Uvogin!!!',
+ 'like_count': int,
+ 'uploader': 'YaBoyRoshi',
+ 'timestamp': 1581636833,
+ 'channel_url': 'https://www.patreon.com/yaboyroshi',
+ 'thumbnail': r're:^https?://.*$',
+ 'tags': ['Hunter x Hunter'],
+ 'uploader_id': '14264111',
+ 'comment_count': int,
+ 'channel_follower_count': int,
+ 'description': 'Kurapika is a walking cheat code!',
+ 'upload_date': '20200213',
+ 'channel_id': '2147162',
+ 'uploader_url': 'https://www.patreon.com/yaboyroshi',
+ },
}]
def _real_extract(self, url):
@@ -250,20 +273,13 @@ class PatreonIE(PatreonBaseIE):
v_url = url_or_none(compat_urllib_parse_unquote(
self._search_regex(r'(https(?:%3A%2F%2F|://)player\.vimeo\.com.+app_id(?:=|%3D)+\d+)', embed_html, 'vimeo url', fatal=False)))
if v_url:
- return {
- **info,
- '_type': 'url_transparent',
- 'url': VimeoIE._smuggle_referrer(v_url, 'https://patreon.com'),
- 'ie_key': 'Vimeo',
- }
+ v_url = VimeoIE._smuggle_referrer(v_url, 'https://patreon.com')
+ if self._request_webpage(v_url, video_id, 'Checking Vimeo embed URL', fatal=False, errnote=False):
+ return self.url_result(v_url, VimeoIE, url_transparent=True, **info)
embed_url = try_get(attributes, lambda x: x['embed']['url'])
- if embed_url:
- return {
- **info,
- '_type': 'url',
- 'url': embed_url,
- }
+ if embed_url and self._request_webpage(embed_url, video_id, 'Checking embed URL', fatal=False, errnote=False):
+ return self.url_result(embed_url, **info)
post_file = traverse_obj(attributes, 'post_file')
if post_file:
diff --git a/yt_dlp/extractor/polsatgo.py b/yt_dlp/extractor/polsatgo.py
index 1524a1f..1cebb36 100644
--- a/yt_dlp/extractor/polsatgo.py
+++ b/yt_dlp/extractor/polsatgo.py
@@ -1,5 +1,5 @@
-from uuid import uuid4
import json
+import uuid
from .common import InfoExtractor
from ..utils import (
@@ -51,7 +51,7 @@ class PolsatGoIE(InfoExtractor):
}
def _call_api(self, endpoint, media_id, method, params):
- rand_uuid = str(uuid4())
+ rand_uuid = str(uuid.uuid4())
res = self._download_json(
f'https://b2c-mobile.redefine.pl/rpc/{endpoint}/', media_id,
note=f'Downloading {method} JSON metadata',
diff --git a/yt_dlp/extractor/pr0gramm.py b/yt_dlp/extractor/pr0gramm.py
index 66f8a5f..3e0ccba 100644
--- a/yt_dlp/extractor/pr0gramm.py
+++ b/yt_dlp/extractor/pr0gramm.py
@@ -1,5 +1,6 @@
+import datetime as dt
import json
-from urllib.parse import unquote
+import urllib.parse
from .common import InfoExtractor
from ..compat import functools
@@ -114,7 +115,7 @@ class Pr0grammIE(InfoExtractor):
cookies = self._get_cookies(self.BASE_URL)
if 'me' not in cookies:
self._download_webpage(self.BASE_URL, None, 'Refreshing verification information')
- if traverse_obj(cookies, ('me', {lambda x: x.value}, {unquote}, {json.loads}, 'verified')):
+ if traverse_obj(cookies, ('me', {lambda x: x.value}, {urllib.parse.unquote}, {json.loads}, 'verified')):
flags |= 0b00110
return flags
@@ -196,6 +197,7 @@ class Pr0grammIE(InfoExtractor):
'like_count': ('up', {int}),
'dislike_count': ('down', {int}),
'timestamp': ('created', {int}),
+ 'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)})
}),
}
diff --git a/yt_dlp/extractor/prosiebensat1.py b/yt_dlp/extractor/prosiebensat1.py
index 46e2e8a..4c33bae 100644
--- a/yt_dlp/extractor/prosiebensat1.py
+++ b/yt_dlp/extractor/prosiebensat1.py
@@ -1,6 +1,6 @@
+import hashlib
import re
-from hashlib import sha1
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
@@ -42,7 +42,7 @@ class ProSiebenSat1BaseIE(InfoExtractor):
'Downloading protocols JSON',
headers=self.geo_verification_headers(), query={
'access_id': self._ACCESS_ID,
- 'client_token': sha1((raw_ct).encode()).hexdigest(),
+ 'client_token': hashlib.sha1((raw_ct).encode()).hexdigest(),
'video_id': clip_id,
}, fatal=False, expected_status=(403,)) or {}
error = protocols.get('error') or {}
@@ -53,7 +53,7 @@ class ProSiebenSat1BaseIE(InfoExtractor):
urls = (self._download_json(
self._V4_BASE_URL + 'urls', clip_id, 'Downloading urls JSON', query={
'access_id': self._ACCESS_ID,
- 'client_token': sha1((raw_ct + server_token + self._SUPPORTED_PROTOCOLS).encode()).hexdigest(),
+ 'client_token': hashlib.sha1((raw_ct + server_token + self._SUPPORTED_PROTOCOLS).encode()).hexdigest(),
'protocols': self._SUPPORTED_PROTOCOLS,
'server_token': server_token,
'video_id': clip_id,
@@ -77,7 +77,7 @@ class ProSiebenSat1BaseIE(InfoExtractor):
if not formats:
source_ids = [compat_str(source['id']) for source in video['sources']]
- client_id = self._SALT[:2] + sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
+ client_id = self._SALT[:2] + hashlib.sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
sources = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources' % clip_id,
@@ -96,7 +96,7 @@ class ProSiebenSat1BaseIE(InfoExtractor):
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
for source_id in source_ids:
- client_id = self._SALT[:2] + sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
+ client_id = self._SALT[:2] + hashlib.sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
urls = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url' % clip_id,
clip_id, 'Downloading urls JSON', fatal=False, query={
diff --git a/yt_dlp/extractor/radiokapital.py b/yt_dlp/extractor/radiokapital.py
index 8f9737a..5d7d3dd 100644
--- a/yt_dlp/extractor/radiokapital.py
+++ b/yt_dlp/extractor/radiokapital.py
@@ -1,18 +1,14 @@
-from .common import InfoExtractor
-from ..utils import (
- clean_html,
- traverse_obj,
- unescapeHTML,
-)
-
import itertools
-from urllib.parse import urlencode
+import urllib.parse
+
+from .common import InfoExtractor
+from ..utils import clean_html, traverse_obj, unescapeHTML
class RadioKapitalBaseIE(InfoExtractor):
def _call_api(self, resource, video_id, note='Downloading JSON metadata', qs={}):
return self._download_json(
- f'https://www.radiokapital.pl/wp-json/kapital/v1/{resource}?{urlencode(qs)}',
+ f'https://www.radiokapital.pl/wp-json/kapital/v1/{resource}?{urllib.parse.urlencode(qs)}',
video_id, note=note)
def _parse_episode(self, data):
diff --git a/yt_dlp/extractor/rokfin.py b/yt_dlp/extractor/rokfin.py
index 5099f3a..3bc5f3c 100644
--- a/yt_dlp/extractor/rokfin.py
+++ b/yt_dlp/extractor/rokfin.py
@@ -1,8 +1,8 @@
+import datetime as dt
import itertools
import json
import re
import urllib.parse
-from datetime import datetime
from .common import InfoExtractor, SearchInfoExtractor
from ..utils import (
@@ -156,7 +156,7 @@ class RokfinIE(InfoExtractor):
self.raise_login_required('This video is only available to premium users', True, method='cookies')
elif scheduled:
self.raise_no_formats(
- f'Stream is offline; scheduled for {datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
+ f'Stream is offline; scheduled for {dt.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
video_id=video_id, expected=True)
uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username'))
diff --git a/yt_dlp/extractor/sejmpl.py b/yt_dlp/extractor/sejmpl.py
index 29cb015..eb433d2 100644
--- a/yt_dlp/extractor/sejmpl.py
+++ b/yt_dlp/extractor/sejmpl.py
@@ -1,4 +1,4 @@
-import datetime
+import datetime as dt
from .common import InfoExtractor
from .redge import RedCDNLivxIE
@@ -13,16 +13,16 @@ from ..utils.traversal import traverse_obj
def is_dst(date):
- last_march = datetime.datetime(date.year, 3, 31)
- last_october = datetime.datetime(date.year, 10, 31)
- last_sunday_march = last_march - datetime.timedelta(days=last_march.isoweekday() % 7)
- last_sunday_october = last_october - datetime.timedelta(days=last_october.isoweekday() % 7)
+ last_march = dt.datetime(date.year, 3, 31)
+ last_october = dt.datetime(date.year, 10, 31)
+ last_sunday_march = last_march - dt.timedelta(days=last_march.isoweekday() % 7)
+ last_sunday_october = last_october - dt.timedelta(days=last_october.isoweekday() % 7)
return last_sunday_march.replace(hour=2) <= date <= last_sunday_october.replace(hour=3)
def rfc3339_to_atende(date):
- date = datetime.datetime.fromisoformat(date)
- date = date + datetime.timedelta(hours=1 if is_dst(date) else 0)
+ date = dt.datetime.fromisoformat(date)
+ date = date + dt.timedelta(hours=1 if is_dst(date) else 0)
return int((date.timestamp() - 978307200) * 1000)
diff --git a/yt_dlp/extractor/sharepoint.py b/yt_dlp/extractor/sharepoint.py
new file mode 100644
index 0000000..d4d5af0
--- /dev/null
+++ b/yt_dlp/extractor/sharepoint.py
@@ -0,0 +1,112 @@
+import json
+import urllib.parse
+
+from .common import InfoExtractor
+from ..utils import determine_ext, int_or_none, url_or_none
+from ..utils.traversal import traverse_obj
+
+
+class SharePointIE(InfoExtractor):
+ _BASE_URL_RE = r'https?://[\w-]+\.sharepoint\.com/'
+ _VALID_URL = [
+ rf'{_BASE_URL_RE}:v:/[a-z]/(?:[^/?#]+/)*(?P<id>[^/?#]{{46}})/?(?:$|[?#])',
+ rf'{_BASE_URL_RE}(?!:v:)(?:[^/?#]+/)*stream\.aspx\?(?:[^#]+&)?id=(?P<id>[^&#]+)',
+ ]
+ _TESTS = [{
+ 'url': 'https://lut-my.sharepoint.com/:v:/g/personal/juha_eerola_student_lab_fi/EUrAmrktb4ZMhUcY9J2PqMEBD_9x_l0DyYWVgAvp-TTOMw?e=ZpQOOw',
+ 'md5': '2950821d0d4937a0a76373782093b435',
+ 'info_dict': {
+ 'id': '01EQRS7EKKYCNLSLLPQZGIKRYY6SOY7KGB',
+ 'display_id': 'EUrAmrktb4ZMhUcY9J2PqMEBD_9x_l0DyYWVgAvp-TTOMw',
+ 'ext': 'mp4',
+ 'title': 'CmvpJST',
+ 'duration': 54.567,
+ 'thumbnail': r're:https://.+/thumbnail',
+ 'uploader_id': '8dcec565-a956-4b91-95e5-bacfb8bc015f',
+ },
+ }, {
+ 'url': 'https://greaternyace.sharepoint.com/:v:/s/acementornydrive/ETski5eAfNVEoPRZUAyy1wEBpLgVFYWso5bjbZjfBLlPUg?e=PQUfVb',
+ 'md5': 'c496a01644223273bff12e93e501afd1',
+ 'info_dict': {
+ 'id': '01QI4AVTZ3ESFZPAD42VCKB5CZKAGLFVYB',
+ 'display_id': 'ETski5eAfNVEoPRZUAyy1wEBpLgVFYWso5bjbZjfBLlPUg',
+ 'ext': 'mp4',
+ 'title': '930103681233985536',
+ 'duration': 3797.326,
+ 'thumbnail': r're:https://.+/thumbnail',
+ },
+ }, {
+ 'url': 'https://lut-my.sharepoint.com/personal/juha_eerola_student_lab_fi/_layouts/15/stream.aspx?id=%2Fpersonal%2Fjuha_eerola_student_lab_fi%2FDocuments%2FM-DL%2FCmvpJST.mp4&ga=1&referrer=StreamWebApp.Web&referrerScenario=AddressBarCopied.view',
+ 'info_dict': {
+ 'id': '01EQRS7EKKYCNLSLLPQZGIKRYY6SOY7KGB',
+ 'display_id': '/personal/juha_eerola_student_lab_fi/Documents/M-DL/CmvpJST.mp4',
+ 'ext': 'mp4',
+ 'title': 'CmvpJST',
+ 'duration': 54.567,
+ 'thumbnail': r're:https://.+/thumbnail',
+ 'uploader_id': '8dcec565-a956-4b91-95e5-bacfb8bc015f',
+ },
+ 'skip': 'Session cookies needed',
+ }, {
+ 'url': 'https://izoobasisschool.sharepoint.com/:v:/g/Eaqleq8COVBIvIPvod0U27oBypC6aWOkk8ptuDpmJ6arHw',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://uskudaredutr-my.sharepoint.com/:v:/g/personal/songul_turkaydin_uskudar_edu_tr/EbTf-VRUIbtGuIN73tx1MuwBCHBOmNcWNqSLw61Fd2_o0g?e=n5Vkof',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://epam-my.sharepoint.com/:v:/p/dzmitry_tamashevich/Ec4ZOs-rATZHjFYZWVxjczEB649FCoYFKDV_x3RxZiWAGA?e=4hswgA',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://microsoft.sharepoint.com/:v:/t/MicrosoftSPARKRecordings-MSFTInternal/EWCyeqByVWBAt8wDvNZdV-UB0BvU5YVbKm0UHgdrUlI6dg?e=QbPck6',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ display_id = urllib.parse.unquote(self._match_id(url))
+ webpage, urlh = self._download_webpage_handle(url, display_id)
+ if urllib.parse.urlparse(urlh.url).hostname == 'login.microsoftonline.com':
+ self.raise_login_required(
+ 'Session cookies are required for this URL and can be passed '
+ 'with the --cookies option. The --cookies-from-browser option will not work', method=None)
+
+ video_data = self._search_json(r'g_fileInfo\s*=', webpage, 'player config', display_id)
+ video_id = video_data['VroomItemId']
+
+ parsed_url = urllib.parse.urlparse(video_data['.transformUrl'])
+ base_media_url = urllib.parse.urlunparse(parsed_url._replace(
+ path=urllib.parse.urljoin(f'{parsed_url.path}/', '../videomanifest'),
+ query=urllib.parse.urlencode({
+ **urllib.parse.parse_qs(parsed_url.query),
+ 'cTag': video_data['.ctag'],
+ 'action': 'Access',
+ 'part': 'index',
+ }, doseq=True)))
+
+ # Web player adds more params to the format URLs but we still get all formats without them
+ formats = self._extract_mpd_formats(
+ base_media_url, video_id, mpd_id='dash', query={'format': 'dash'}, fatal=False)
+ for hls_type in ('hls', 'hls-vnext'):
+ formats.extend(self._extract_m3u8_formats(
+ base_media_url, video_id, 'mp4', m3u8_id=hls_type,
+ query={'format': hls_type}, fatal=False, quality=-2))
+
+ if video_url := traverse_obj(video_data, ('downloadUrl', {url_or_none})):
+ formats.append({
+ 'url': video_url,
+ 'ext': determine_ext(video_data.get('extension') or video_data.get('name')),
+ 'quality': 1,
+ 'format_id': 'source',
+ 'filesize': int_or_none(video_data.get('size')),
+ 'vcodec': 'none' if video_data.get('isAudio') is True else None,
+ })
+
+ return {
+ 'id': video_id,
+ 'formats': formats,
+ 'title': video_data.get('title') or video_data.get('displayName'),
+ 'display_id': display_id,
+ 'uploader_id': video_data.get('authorId'),
+ 'duration': traverse_obj(video_data, (
+ 'MediaServiceFastMetadata', {json.loads}, 'media', 'duration', {lambda x: x / 10000000})),
+ 'thumbnail': url_or_none(video_data.get('thumbnailUrl')),
+ }
diff --git a/yt_dlp/extractor/sonyliv.py b/yt_dlp/extractor/sonyliv.py
index 4379572..7c914ac 100644
--- a/yt_dlp/extractor/sonyliv.py
+++ b/yt_dlp/extractor/sonyliv.py
@@ -1,4 +1,5 @@
-import datetime
+import datetime as dt
+import itertools
import json
import math
import random
@@ -12,8 +13,8 @@ from ..utils import (
int_or_none,
jwt_decode_hs256,
try_call,
- try_get,
)
+from ..utils.traversal import traverse_obj
class SonyLIVIE(InfoExtractor):
@@ -93,7 +94,7 @@ class SonyLIVIE(InfoExtractor):
'mobileNumber': username,
'channelPartnerID': 'MSMIND',
'country': 'IN',
- 'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
+ 'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
'otpSize': 6,
'loginType': 'REGISTERORSIGNIN',
'isMobileMandatory': True,
@@ -110,7 +111,7 @@ class SonyLIVIE(InfoExtractor):
'otp': self._get_tfa_info('OTP'),
'dmaId': 'IN',
'ageConfirmation': True,
- 'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
+ 'timestamp': dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%MZ'),
'isMobileMandatory': True,
}).encode())
if otp_verify_json['resultCode'] == 'KO':
@@ -183,17 +184,21 @@ class SonyLIVIE(InfoExtractor):
class SonyLIVSeriesIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?sonyliv\.com/shows/[^/?#&]+-(?P<id>\d{10})$'
+ _VALID_URL = r'https?://(?:www\.)?sonyliv\.com/shows/[^/?#&]+-(?P<id>\d{10})/?(?:$|[?#])'
_TESTS = [{
'url': 'https://www.sonyliv.com/shows/adaalat-1700000091',
- 'playlist_mincount': 456,
+ 'playlist_mincount': 452,
'info_dict': {
'id': '1700000091',
},
+ }, {
+ 'url': 'https://www.sonyliv.com/shows/beyhadh-1700000007/',
+ 'playlist_mincount': 358,
+ 'info_dict': {
+ 'id': '1700000007',
+ },
}]
- _API_SHOW_URL = "https://apiv2.sonyliv.com/AGL/1.9/R/ENG/WEB/IN/DL/DETAIL/{}?kids_safe=false&from=0&to=49"
- _API_EPISODES_URL = "https://apiv2.sonyliv.com/AGL/1.4/R/ENG/WEB/IN/CONTENT/DETAIL/BUNDLE/{}?from=0&to=1000&orderBy=episodeNumber&sortOrder=asc"
- _API_SECURITY_URL = 'https://apiv2.sonyliv.com/AGL/1.4/A/ENG/WEB/ALL/GETTOKEN'
+ _API_BASE = 'https://apiv2.sonyliv.com/AGL'
def _entries(self, show_id):
headers = {
@@ -201,19 +206,34 @@ class SonyLIVSeriesIE(InfoExtractor):
'Referer': 'https://www.sonyliv.com',
}
headers['security_token'] = self._download_json(
- self._API_SECURITY_URL, video_id=show_id, headers=headers,
- note='Downloading security token')['resultObj']
- seasons = try_get(
- self._download_json(self._API_SHOW_URL.format(show_id), video_id=show_id, headers=headers),
- lambda x: x['resultObj']['containers'][0]['containers'], list)
- for season in seasons or []:
- season_id = season['id']
- episodes = try_get(
- self._download_json(self._API_EPISODES_URL.format(season_id), video_id=season_id, headers=headers),
- lambda x: x['resultObj']['containers'][0]['containers'], list)
- for episode in episodes or []:
- video_id = episode.get('id')
- yield self.url_result('sonyliv:%s' % video_id, ie=SonyLIVIE.ie_key(), video_id=video_id)
+ f'{self._API_BASE}/1.4/A/ENG/WEB/ALL/GETTOKEN', show_id,
+ 'Downloading security token', headers=headers)['resultObj']
+ seasons = traverse_obj(self._download_json(
+ f'{self._API_BASE}/1.9/R/ENG/WEB/IN/DL/DETAIL/{show_id}', show_id,
+ 'Downloading series JSON', headers=headers, query={
+ 'kids_safe': 'false',
+ 'from': '0',
+ 'to': '49',
+ }), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
+ for season in seasons:
+ season_id = str(season['id'])
+ note = traverse_obj(season, ('metadata', 'title', {str})) or 'season'
+ cursor = 0
+ for page_num in itertools.count(1):
+ episodes = traverse_obj(self._download_json(
+ f'{self._API_BASE}/1.4/R/ENG/WEB/IN/CONTENT/DETAIL/BUNDLE/{season_id}',
+ season_id, f'Downloading {note} page {page_num} JSON', headers=headers, query={
+ 'from': str(cursor),
+ 'to': str(cursor + 99),
+ 'orderBy': 'episodeNumber',
+ 'sortOrder': 'asc',
+ }), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
+ if not episodes:
+ break
+ for episode in episodes:
+ video_id = str(episode['id'])
+ yield self.url_result(f'sonyliv:{video_id}', SonyLIVIE, video_id)
+ cursor += 100
def _real_extract(self, url):
show_id = self._match_id(url)
diff --git a/yt_dlp/extractor/soundcloud.py b/yt_dlp/extractor/soundcloud.py
index a7c2afd..c9ed645 100644
--- a/yt_dlp/extractor/soundcloud.py
+++ b/yt_dlp/extractor/soundcloud.py
@@ -1,30 +1,27 @@
import itertools
-import re
import json
-# import random
+import re
-from .common import (
- InfoExtractor,
- SearchInfoExtractor
-)
+from .common import InfoExtractor, SearchInfoExtractor
from ..compat import compat_str
-from ..networking import HEADRequest, Request
+from ..networking import HEADRequest
from ..networking.exceptions import HTTPError
from ..utils import (
- error_to_compat_str,
+ KNOWN_EXTENSIONS,
ExtractorError,
+ error_to_compat_str,
float_or_none,
int_or_none,
- KNOWN_EXTENSIONS,
mimetype2ext,
parse_qs,
str_or_none,
- try_get,
+ try_call,
unified_timestamp,
update_url_query,
url_or_none,
urlhandle_detect_ext,
)
+from ..utils.traversal import traverse_obj
class SoundcloudEmbedIE(InfoExtractor):
@@ -54,7 +51,6 @@ class SoundcloudBaseIE(InfoExtractor):
_API_AUTH_QUERY_TEMPLATE = '?client_id=%s'
_API_AUTH_URL_PW = 'https://api-auth.soundcloud.com/web-auth/sign-in/password%s'
_API_VERIFY_AUTH_TOKEN = 'https://api-auth.soundcloud.com/connect/session%s'
- _access_token = None
_HEADERS = {}
_IMAGE_REPL_RE = r'-([0-9a-z]+)\.jpg'
@@ -112,21 +108,31 @@ class SoundcloudBaseIE(InfoExtractor):
def _initialize_pre_login(self):
self._CLIENT_ID = self.cache.load('soundcloud', 'client_id') or 'a3e059563d7fd3372b49b37f00a00bcf'
+ def _verify_oauth_token(self, token):
+ if self._request_webpage(
+ self._API_VERIFY_AUTH_TOKEN % (self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID),
+ None, note='Verifying login token...', fatal=False,
+ data=json.dumps({'session': {'access_token': token}}).encode()):
+ self._HEADERS['Authorization'] = f'OAuth {token}'
+ self.report_login()
+ else:
+ self.report_warning('Provided authorization token is invalid. Continuing as guest')
+
+ def _real_initialize(self):
+ if self._HEADERS:
+ return
+ if token := try_call(lambda: self._get_cookies(self._BASE_URL)['oauth_token'].value):
+ self._verify_oauth_token(token)
+
def _perform_login(self, username, password):
if username != 'oauth':
- self.report_warning(
+ raise ExtractorError(
'Login using username and password is not currently supported. '
- 'Use "--username oauth --password <oauth_token>" to login using an oauth token')
- self._access_token = password
- query = self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID
- payload = {'session': {'access_token': self._access_token}}
- token_verification = Request(self._API_VERIFY_AUTH_TOKEN % query, json.dumps(payload).encode('utf-8'))
- response = self._download_json(token_verification, None, note='Verifying login token...', fatal=False)
- if response is not False:
- self._HEADERS = {'Authorization': 'OAuth ' + self._access_token}
- self.report_login()
- else:
- self.report_warning('Provided authorization token seems to be invalid. Continue as guest')
+ 'Use "--username oauth --password <oauth_token>" to login using an oauth token, '
+ f'or else {self._login_hint(method="cookies")}', expected=True)
+ if self._HEADERS:
+ return
+ self._verify_oauth_token(password)
r'''
def genDevId():
@@ -147,14 +153,17 @@ class SoundcloudBaseIE(InfoExtractor):
'user_agent': self._USER_AGENT
}
- query = self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID
- login = sanitized_Request(self._API_AUTH_URL_PW % query, json.dumps(payload).encode('utf-8'))
- response = self._download_json(login, None)
- self._access_token = response.get('session').get('access_token')
- if not self._access_token:
- self.report_warning('Unable to get access token, login may has failed')
- else:
- self._HEADERS = {'Authorization': 'OAuth ' + self._access_token}
+ response = self._download_json(
+ self._API_AUTH_URL_PW % (self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID),
+ None, note='Verifying login token...', fatal=False,
+ data=json.dumps(payload).encode())
+
+ if token := traverse_obj(response, ('session', 'access_token', {str})):
+ self._HEADERS['Authorization'] = f'OAuth {token}'
+ self.report_login()
+ return
+
+ raise ExtractorError('Unable to get access token, login may have failed', expected=True)
'''
# signature generation
@@ -217,6 +226,7 @@ class SoundcloudBaseIE(InfoExtractor):
'filesize': int_or_none(urlh.headers.get('Content-Length')),
'url': format_url,
'quality': 10,
+ 'format_note': 'Original',
})
def invalid_url(url):
@@ -233,9 +243,13 @@ class SoundcloudBaseIE(InfoExtractor):
format_id_list.append(protocol)
ext = f.get('ext')
if ext == 'aac':
- f['abr'] = '256'
+ f.update({
+ 'abr': 256,
+ 'quality': 5,
+ 'format_note': 'Premium',
+ })
for k in ('ext', 'abr'):
- v = f.get(k)
+ v = str_or_none(f.get(k))
if v:
format_id_list.append(v)
preview = is_preview or re.search(r'/(?:preview|playlist)/0/30/', f['url'])
@@ -256,16 +270,25 @@ class SoundcloudBaseIE(InfoExtractor):
formats.append(f)
# New API
- transcodings = try_get(
- info, lambda x: x['media']['transcodings'], list) or []
- for t in transcodings:
- if not isinstance(t, dict):
- continue
- format_url = url_or_none(t.get('url'))
- if not format_url:
- continue
- stream = None if extract_flat else self._download_json(
- format_url, track_id, query=query, fatal=False, headers=self._HEADERS)
+ for t in traverse_obj(info, ('media', 'transcodings', lambda _, v: url_or_none(v['url']))):
+ if extract_flat:
+ break
+ format_url = t['url']
+ stream = None
+
+ for retry in self.RetryManager(fatal=False):
+ try:
+ stream = self._download_json(format_url, track_id, query=query, headers=self._HEADERS)
+ except ExtractorError as e:
+ if isinstance(e.cause, HTTPError) and e.cause.status == 429:
+ self.report_warning(
+ 'You have reached the API rate limit, which is ~600 requests per '
+ '10 minutes. Use the --extractor-retries and --retry-sleep options '
+ 'to configure an appropriate retry count and wait time', only_once=True)
+ retry.error = e.cause
+ else:
+ self.report_warning(e.msg)
+
if not isinstance(stream, dict):
continue
stream_url = url_or_none(stream.get('url'))
diff --git a/yt_dlp/extractor/telewebion.py b/yt_dlp/extractor/telewebion.py
index 9378ed0..5fdcddd 100644
--- a/yt_dlp/extractor/telewebion.py
+++ b/yt_dlp/extractor/telewebion.py
@@ -1,8 +1,7 @@
from __future__ import annotations
-
+import functools
import json
-from functools import partial
-from textwrap import dedent
+import textwrap
from .common import InfoExtractor
from ..utils import ExtractorError, format_field, int_or_none, parse_iso8601
@@ -10,7 +9,7 @@ from ..utils.traversal import traverse_obj
def _fmt_url(url):
- return partial(format_field, template=url, default=None)
+ return functools.partial(format_field, template=url, default=None)
class TelewebionIE(InfoExtractor):
@@ -88,7 +87,7 @@ class TelewebionIE(InfoExtractor):
if not video_id.startswith('0x'):
video_id = hex(int(video_id))
- episode_data = self._call_graphql_api('getEpisodeDetail', video_id, dedent('''
+ episode_data = self._call_graphql_api('getEpisodeDetail', video_id, textwrap.dedent('''
queryEpisode(filter: {EpisodeID: $EpisodeId}, first: 1) {
title
program {
@@ -127,7 +126,7 @@ class TelewebionIE(InfoExtractor):
'formats': (
'channel', 'descriptor', {str},
{_fmt_url(f'https://cdna.telewebion.com/%s/episode/{video_id}/playlist.m3u8')},
- {partial(self._extract_m3u8_formats, video_id=video_id, ext='mp4', m3u8_id='hls')}),
+ {functools.partial(self._extract_m3u8_formats, video_id=video_id, ext='mp4', m3u8_id='hls')}),
}))
info_dict['id'] = video_id
return info_dict
diff --git a/yt_dlp/extractor/tenplay.py b/yt_dlp/extractor/tenplay.py
index a98275d..11cc570 100644
--- a/yt_dlp/extractor/tenplay.py
+++ b/yt_dlp/extractor/tenplay.py
@@ -1,7 +1,7 @@
import base64
+import datetime as dt
import functools
import itertools
-from datetime import datetime
from .common import InfoExtractor
from ..networking import HEADRequest
@@ -70,7 +70,7 @@ class TenPlayIE(InfoExtractor):
username, password = self._get_login_info()
if username is None or password is None:
self.raise_login_required('Your 10play account\'s details must be provided with --username and --password.')
- _timestamp = datetime.now().strftime('%Y%m%d000000')
+ _timestamp = dt.datetime.now().strftime('%Y%m%d000000')
_auth_header = base64.b64encode(_timestamp.encode('ascii')).decode('ascii')
data = self._download_json('https://10play.com.au/api/user/auth', video_id, 'Getting bearer token', headers={
'X-Network-Ten-Auth': _auth_header,
diff --git a/yt_dlp/extractor/thisoldhouse.py b/yt_dlp/extractor/thisoldhouse.py
index 15f8380..fbc12d5 100644
--- a/yt_dlp/extractor/thisoldhouse.py
+++ b/yt_dlp/extractor/thisoldhouse.py
@@ -1,5 +1,6 @@
import json
+from .brightcove import BrightcoveNewIE
from .common import InfoExtractor
from .zype import ZypeIE
from ..networking import HEADRequest
@@ -8,6 +9,7 @@ from ..utils import (
ExtractorError,
filter_dict,
parse_qs,
+ smuggle_url,
try_call,
urlencode_postdata,
)
@@ -17,24 +19,44 @@ class ThisOldHouseIE(InfoExtractor):
_NETRC_MACHINE = 'thisoldhouse'
_VALID_URL = r'https?://(?:www\.)?thisoldhouse\.com/(?:watch|how-to|tv-episode|(?:[^/?#]+/)?\d+)/(?P<id>[^/?#]+)'
_TESTS = [{
+ # Unresolved Brightcove URL embed (formerly Zype), free
'url': 'https://www.thisoldhouse.com/furniture/21017078/how-to-build-a-storage-bench',
'info_dict': {
- 'id': '5dcdddf673c3f956ef5db202',
+ 'id': '6325298523112',
'ext': 'mp4',
'title': 'How to Build a Storage Bench',
'description': 'In the workshop, Tom Silva and Kevin O\'Connor build a storage bench for an entryway.',
- 'timestamp': 1442548800,
- 'upload_date': '20150918',
- 'duration': 674,
- 'view_count': int,
- 'average_rating': 0,
- 'thumbnail': r're:^https?://.*\.jpg\?\d+$',
- 'display_id': 'how-to-build-a-storage-bench',
+ 'timestamp': 1681793639,
+ 'upload_date': '20230418',
+ 'duration': 674.54,
+ 'tags': 'count:11',
+ 'uploader_id': '6314471934001',
+ 'thumbnail': r're:^https?://.*\.jpg',
},
'params': {
'skip_download': True,
},
}, {
+ # Brightcove embed, authwalled
+ 'url': 'https://www.thisoldhouse.com/glen-ridge-generational/99537/s45-e17-multi-generational',
+ 'info_dict': {
+ 'id': '6349675446112',
+ 'ext': 'mp4',
+ 'title': 'E17 | Glen Ridge Generational | Multi-Generational',
+ 'description': 'md5:53c6bc2e8031f3033d693d9a3563222c',
+ 'timestamp': 1711382202,
+ 'upload_date': '20240325',
+ 'duration': 1422.229,
+ 'tags': 'count:13',
+ 'uploader_id': '6314471934001',
+ 'thumbnail': r're:^https?://.*\.jpg',
+ },
+ 'expected_warnings': ['Login with password is not supported for this website'],
+ 'params': {
+ 'skip_download': True,
+ },
+ 'skip': 'Requires subscription',
+ }, {
# Page no longer has video
'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins',
'only_matching': True,
@@ -98,7 +120,15 @@ class ThisOldHouseIE(InfoExtractor):
video_url, video_id = self._search_regex(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})[^\'"]*)[\'"]',
- webpage, 'video url', group=(1, 2))
- video_url = self._request_webpage(HEADRequest(video_url), video_id, 'Resolving Zype URL').url
+ webpage, 'zype url', group=(1, 2), default=(None, None))
+ if video_url:
+ video_url = self._request_webpage(HEADRequest(video_url), video_id, 'Resolving Zype URL').url
+ return self.url_result(video_url, ZypeIE, video_id)
- return self.url_result(video_url, ZypeIE, video_id)
+ video_url, video_id = self._search_regex([
+ r'<iframe[^>]+src=[\'"]((?:https?:)?//players\.brightcove\.net/\d+/\w+/index\.html\?videoId=(\d+))',
+ r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)thisoldhouse\.com/videos/brightcove/(\d+))'],
+ webpage, 'iframe url', group=(1, 2))
+ if not parse_qs(video_url).get('videoId'):
+ video_url = self._request_webpage(HEADRequest(video_url), video_id, 'Resolving Brightcove URL').url
+ return self.url_result(smuggle_url(video_url, {'referrer': url}), BrightcoveNewIE, video_id)
diff --git a/yt_dlp/extractor/tiktok.py b/yt_dlp/extractor/tiktok.py
index aa83567..3f5261a 100644
--- a/yt_dlp/extractor/tiktok.py
+++ b/yt_dlp/extractor/tiktok.py
@@ -4,6 +4,7 @@ import random
import re
import string
import time
+import uuid
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
@@ -30,18 +31,64 @@ from ..utils import (
class TikTokBaseIE(InfoExtractor):
- _APP_VERSIONS = [('26.1.3', '260103'), ('26.1.2', '260102'), ('26.1.1', '260101'), ('25.6.2', '250602')]
- _WORKING_APP_VERSION = None
- _APP_NAME = 'trill'
- _AID = 1180
_UPLOADER_URL_FORMAT = 'https://www.tiktok.com/@%s'
_WEBPAGE_HOST = 'https://www.tiktok.com/'
QUALITIES = ('360p', '540p', '720p', '1080p')
+ _APP_INFO_DEFAULTS = {
+ # unique "install id"
+ 'iid': None,
+ # TikTok (KR/PH/TW/TH/VN) = trill, TikTok (rest of world) = musical_ly, Douyin = aweme
+ 'app_name': 'musical_ly',
+ 'app_version': '34.1.2',
+ 'manifest_app_version': '2023401020',
+ # "app id": aweme = 1128, trill = 1180, musical_ly = 1233, universal = 0
+ 'aid': '0',
+ }
+ _KNOWN_APP_INFO = [
+ '7351144126450059040',
+ '7351149742343391009',
+ '7351153174894626592',
+ ]
+ _APP_INFO_POOL = None
+ _APP_INFO = None
+ _APP_USER_AGENT = None
+
@property
def _API_HOSTNAME(self):
return self._configuration_arg(
- 'api_hostname', ['api16-normal-c-useast1a.tiktokv.com'], ie_key=TikTokIE)[0]
+ 'api_hostname', ['api22-normal-c-useast2a.tiktokv.com'], ie_key=TikTokIE)[0]
+
+ def _get_next_app_info(self):
+ if self._APP_INFO_POOL is None:
+ defaults = {
+ key: self._configuration_arg(key, [default], ie_key=TikTokIE)[0]
+ for key, default in self._APP_INFO_DEFAULTS.items()
+ if key != 'iid'
+ }
+ app_info_list = (
+ self._configuration_arg('app_info', ie_key=TikTokIE)
+ or random.sample(self._KNOWN_APP_INFO, len(self._KNOWN_APP_INFO)))
+ self._APP_INFO_POOL = [
+ {**defaults, **dict(
+ (k, v) for k, v in zip(self._APP_INFO_DEFAULTS, app_info.split('/')) if v
+ )} for app_info in app_info_list
+ ]
+
+ if not self._APP_INFO_POOL:
+ return False
+
+ self._APP_INFO = self._APP_INFO_POOL.pop(0)
+
+ app_name = self._APP_INFO['app_name']
+ version = self._APP_INFO['manifest_app_version']
+ if app_name == 'musical_ly':
+ package = f'com.zhiliaoapp.musically/{version}'
+ else: # trill, aweme
+ package = f'com.ss.android.ugc.{app_name}/{version}'
+ self._APP_USER_AGENT = f'{package} (Linux; U; Android 13; en_US; Pixel 7; Build/TD1A.220804.031; Cronet/58.0.2991.0)'
+
+ return True
@staticmethod
def _create_url(user_id, video_id):
@@ -58,7 +105,7 @@ class TikTokBaseIE(InfoExtractor):
'universal data', display_id, end_pattern=r'</script>', default={}),
('__DEFAULT_SCOPE__', {dict})) or {}
- def _call_api_impl(self, ep, query, manifest_app_version, video_id, fatal=True,
+ def _call_api_impl(self, ep, query, video_id, fatal=True,
note='Downloading API JSON', errnote='Unable to download API page'):
self._set_cookie(self._API_HOSTNAME, 'odin_tt', ''.join(random.choices('0123456789abcdef', k=160)))
webpage_cookies = self._get_cookies(self._WEBPAGE_HOST)
@@ -67,80 +114,85 @@ class TikTokBaseIE(InfoExtractor):
return self._download_json(
'https://%s/aweme/v1/%s/' % (self._API_HOSTNAME, ep), video_id=video_id,
fatal=fatal, note=note, errnote=errnote, headers={
- 'User-Agent': f'com.ss.android.ugc.{self._APP_NAME}/{manifest_app_version} (Linux; U; Android 13; en_US; Pixel 7; Build/TD1A.220804.031; Cronet/58.0.2991.0)',
+ 'User-Agent': self._APP_USER_AGENT,
'Accept': 'application/json',
}, query=query)
- def _build_api_query(self, query, app_version, manifest_app_version):
+ def _build_api_query(self, query):
return {
**query,
- 'version_name': app_version,
- 'version_code': manifest_app_version,
- 'build_number': app_version,
- 'manifest_version_code': manifest_app_version,
- 'update_version_code': manifest_app_version,
- 'openudid': ''.join(random.choices('0123456789abcdef', k=16)),
- 'uuid': ''.join(random.choices(string.digits, k=16)),
- '_rticket': int(time.time() * 1000),
- 'ts': int(time.time()),
- 'device_brand': 'Google',
- 'device_type': 'Pixel 7',
'device_platform': 'android',
+ 'os': 'android',
+ 'ssmix': 'a',
+ '_rticket': int(time.time() * 1000),
+ 'cdid': str(uuid.uuid4()),
+ 'channel': 'googleplay',
+ 'aid': self._APP_INFO['aid'],
+ 'app_name': self._APP_INFO['app_name'],
+ 'version_code': ''.join((f'{int(v):02d}' for v in self._APP_INFO['app_version'].split('.'))),
+ 'version_name': self._APP_INFO['app_version'],
+ 'manifest_version_code': self._APP_INFO['manifest_app_version'],
+ 'update_version_code': self._APP_INFO['manifest_app_version'],
+ 'ab_version': self._APP_INFO['app_version'],
'resolution': '1080*2400',
'dpi': 420,
- 'os_version': '13',
+ 'device_type': 'Pixel 7',
+ 'device_brand': 'Google',
+ 'language': 'en',
'os_api': '29',
- 'carrier_region': 'US',
+ 'os_version': '13',
+ 'ac': 'wifi',
+ 'is_pad': '0',
+ 'current_region': 'US',
+ 'app_type': 'normal',
'sys_region': 'US',
- 'region': 'US',
- 'app_name': self._APP_NAME,
- 'app_language': 'en',
- 'language': 'en',
+ 'last_install_time': int(time.time()) - random.randint(86400, 1123200),
'timezone_name': 'America/New_York',
+ 'residence': 'US',
+ 'app_language': 'en',
'timezone_offset': '-14400',
- 'channel': 'googleplay',
- 'ac': 'wifi',
- 'mcc_mnc': '310260',
- 'is_my_cn': 0,
- 'aid': self._AID,
- 'ssmix': 'a',
- 'as': 'a1qwert123',
- 'cp': 'cbfhckdckkde1',
+ 'host_abi': 'armeabi-v7a',
+ 'locale': 'en',
+ 'ac2': 'wifi5g',
+ 'uoo': '1',
+ 'carrier_region': 'US',
+ 'op_region': 'US',
+ 'build_number': self._APP_INFO['app_version'],
+ 'region': 'US',
+ 'ts': int(time.time()),
+ 'iid': self._APP_INFO['iid'],
+ 'device_id': random.randint(7250000000000000000, 7351147085025500000),
+ 'openudid': ''.join(random.choices('0123456789abcdef', k=16)),
}
def _call_api(self, ep, query, video_id, fatal=True,
note='Downloading API JSON', errnote='Unable to download API page'):
- if not self._WORKING_APP_VERSION:
- app_version = self._configuration_arg('app_version', [''], ie_key=TikTokIE.ie_key())[0]
- manifest_app_version = self._configuration_arg('manifest_app_version', [''], ie_key=TikTokIE.ie_key())[0]
- if app_version and manifest_app_version:
- self._WORKING_APP_VERSION = (app_version, manifest_app_version)
- self.write_debug('Imported app version combo from extractor arguments')
- elif app_version or manifest_app_version:
- self.report_warning('Only one of the two required version params are passed as extractor arguments', only_once=True)
-
- if self._WORKING_APP_VERSION:
- app_version, manifest_app_version = self._WORKING_APP_VERSION
- real_query = self._build_api_query(query, app_version, manifest_app_version)
- return self._call_api_impl(ep, real_query, manifest_app_version, video_id, fatal, note, errnote)
-
- for count, (app_version, manifest_app_version) in enumerate(self._APP_VERSIONS, start=1):
- real_query = self._build_api_query(query, app_version, manifest_app_version)
+ if not self._APP_INFO and not self._get_next_app_info():
+ message = 'No working app info is available'
+ if fatal:
+ raise ExtractorError(message, expected=True)
+ else:
+ self.report_warning(message)
+ return
+
+ max_tries = len(self._APP_INFO_POOL) + 1 # _APP_INFO_POOL + _APP_INFO
+ for count in itertools.count(1):
+ self.write_debug(str(self._APP_INFO))
+ real_query = self._build_api_query(query)
try:
- res = self._call_api_impl(ep, real_query, manifest_app_version, video_id, fatal, note, errnote)
- self._WORKING_APP_VERSION = (app_version, manifest_app_version)
- return res
+ return self._call_api_impl(ep, real_query, video_id, fatal, note, errnote)
except ExtractorError as e:
if isinstance(e.cause, json.JSONDecodeError) and e.cause.pos == 0:
- if count == len(self._APP_VERSIONS):
+ message = str(e.cause or e.msg)
+ if not self._get_next_app_info():
if fatal:
- raise e
+ raise
else:
- self.report_warning(str(e.cause or e.msg))
+ self.report_warning(message)
return
- self.report_warning('%s. Retrying... (attempt %s of %s)' % (str(e.cause or e.msg), count, len(self._APP_VERSIONS)))
+ self.report_warning(f'{message}. Retrying... (attempt {count} of {max_tries})')
continue
- raise e
+ raise
def _extract_aweme_app(self, aweme_id):
feed_list = self._call_api(
@@ -223,6 +275,7 @@ class TikTokBaseIE(InfoExtractor):
def extract_addr(addr, add_meta={}):
parsed_meta, res = parse_url_key(addr.get('url_key', ''))
+ is_bytevc2 = parsed_meta.get('vcodec') == 'bytevc2'
if res:
known_resolutions.setdefault(res, {}).setdefault('height', int_or_none(addr.get('height')))
known_resolutions[res].setdefault('width', int_or_none(addr.get('width')))
@@ -235,8 +288,11 @@ class TikTokBaseIE(InfoExtractor):
'acodec': 'aac',
'source_preference': -2 if 'aweme/v1' in url else -1, # Downloads from API might get blocked
**add_meta, **parsed_meta,
+ # bytevc2 is bytedance's proprietary (unplayable) video codec
+ 'preference': -100 if is_bytevc2 else -1,
'format_note': join_nonempty(
- add_meta.get('format_note'), '(API)' if 'aweme/v1' in url else None, delim=' '),
+ add_meta.get('format_note'), '(API)' if 'aweme/v1' in url else None,
+ '(UNPLAYABLE)' if is_bytevc2 else None, delim=' '),
**audio_meta(url),
} for url in addr.get('url_list') or []]
diff --git a/yt_dlp/extractor/twitch.py b/yt_dlp/extractor/twitch.py
index c55786a..80cba09 100644
--- a/yt_dlp/extractor/twitch.py
+++ b/yt_dlp/extractor/twitch.py
@@ -191,17 +191,25 @@ class TwitchBaseIE(InfoExtractor):
}] if thumbnail else None
def _extract_twitch_m3u8_formats(self, path, video_id, token, signature):
- return self._extract_m3u8_formats(
+ formats = self._extract_m3u8_formats(
f'{self._USHER_BASE}/{path}/{video_id}.m3u8', video_id, 'mp4', query={
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'p': random.randint(1000000, 10000000),
+ 'platform': 'web',
'player': 'twitchweb',
+ 'supported_codecs': 'av1,h265,h264',
'playlist_include_framerate': 'true',
'sig': signature,
'token': token,
})
+ for fmt in formats:
+ if fmt.get('vcodec') and fmt['vcodec'].startswith('av01'):
+ # mpegts does not yet have proper support for av1
+ fmt['downloader_options'] = {'ffmpeg_args_out': ['-f', 'mp4']}
+
+ return formats
class TwitchVodIE(TwitchBaseIE):
diff --git a/yt_dlp/extractor/vk.py b/yt_dlp/extractor/vk.py
index e4a78c2..7e3a3a9 100644
--- a/yt_dlp/extractor/vk.py
+++ b/yt_dlp/extractor/vk.py
@@ -707,6 +707,7 @@ class VKWallPostIE(VKBaseIE):
class VKPlayBaseIE(InfoExtractor):
+ _BASE_URL_RE = r'https?://(?:vkplay\.live|live\.vkplay\.ru)/'
_RESOLUTIONS = {
'tiny': '256x144',
'lowest': '426x240',
@@ -765,7 +766,7 @@ class VKPlayBaseIE(InfoExtractor):
class VKPlayIE(VKPlayBaseIE):
- _VALID_URL = r'https?://vkplay\.live/(?P<username>[^/#?]+)/record/(?P<id>[a-f0-9-]+)'
+ _VALID_URL = rf'{VKPlayBaseIE._BASE_URL_RE}(?P<username>[^/#?]+)/record/(?P<id>[\da-f-]+)'
_TESTS = [{
'url': 'https://vkplay.live/zitsmann/record/f5e6e3b5-dc52-4d14-965d-0680dd2882da',
'info_dict': {
@@ -776,13 +777,16 @@ class VKPlayIE(VKPlayBaseIE):
'uploader_id': '13159830',
'release_timestamp': 1683461378,
'release_date': '20230507',
- 'thumbnail': r're:https://images.vkplay.live/public_video_stream/record/f5e6e3b5-dc52-4d14-965d-0680dd2882da/preview\?change_time=\d+',
+ 'thumbnail': r're:https://[^/]+/public_video_stream/record/f5e6e3b5-dc52-4d14-965d-0680dd2882da/preview',
'duration': 10608,
'view_count': int,
'like_count': int,
'categories': ['Atomic Heart'],
},
'params': {'skip_download': 'm3u8'},
+ }, {
+ 'url': 'https://live.vkplay.ru/lebwa/record/33a4e4ce-e3ef-49db-bb14-f006cc6fabc9/records',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -802,7 +806,7 @@ class VKPlayIE(VKPlayBaseIE):
class VKPlayLiveIE(VKPlayBaseIE):
- _VALID_URL = r'https?://vkplay\.live/(?P<id>[^/#?]+)/?(?:[#?]|$)'
+ _VALID_URL = rf'{VKPlayBaseIE._BASE_URL_RE}(?P<id>[^/#?]+)/?(?:[#?]|$)'
_TESTS = [{
'url': 'https://vkplay.live/bayda',
'info_dict': {
@@ -813,7 +817,7 @@ class VKPlayLiveIE(VKPlayBaseIE):
'uploader_id': '12279401',
'release_timestamp': 1687209962,
'release_date': '20230619',
- 'thumbnail': r're:https://images.vkplay.live/public_video_stream/12279401/preview\?change_time=\d+',
+ 'thumbnail': r're:https://[^/]+/public_video_stream/12279401/preview',
'view_count': int,
'concurrent_view_count': int,
'like_count': int,
@@ -822,6 +826,9 @@ class VKPlayLiveIE(VKPlayBaseIE):
},
'skip': 'livestream',
'params': {'skip_download': True},
+ }, {
+ 'url': 'https://live.vkplay.ru/lebwa',
+ 'only_matching': True,
}]
def _real_extract(self, url):
diff --git a/yt_dlp/extractor/vrt.py b/yt_dlp/extractor/vrt.py
index 497233d..3d26549 100644
--- a/yt_dlp/extractor/vrt.py
+++ b/yt_dlp/extractor/vrt.py
@@ -16,6 +16,7 @@ from ..utils import (
join_nonempty,
jwt_encode_hs256,
make_archive_id,
+ merge_dicts,
parse_age_limit,
parse_iso8601,
str_or_none,
@@ -425,3 +426,64 @@ class DagelijkseKostIE(VRTBaseIE):
['description', 'twitter:description', 'og:description'], webpage),
'_old_archive_ids': [make_archive_id('Canvas', video_id)],
}
+
+
+class Radio1BeIE(VRTBaseIE):
+ _VALID_URL = r'https?://radio1\.be/(?:lees|luister/select)/(?P<id>[\w/-]+)'
+ _TESTS = [{
+ 'url': 'https://radio1.be/luister/select/de-ochtend/komt-n-va-volgend-jaar-op-in-wallonie',
+ 'info_dict': {
+ 'id': 'eb6c22e9-544f-44f4-af39-cf8cccd29e22',
+ 'title': 'Komt N-VA volgend jaar op in Wallonië?',
+ 'display_id': 'de-ochtend/komt-n-va-volgend-jaar-op-in-wallonie',
+ 'description': 'md5:b374ea1c9302f38362df9dea1931468e',
+ 'thumbnail': r're:https?://cds\.vrt\.radio/[^/#\?&]+'
+ },
+ 'playlist_mincount': 1
+ }, {
+ 'url': 'https://radio1.be/lees/europese-unie-wil-onmiddellijke-humanitaire-pauze-en-duurzaam-staakt-het-vuren-in-gaza?view=web',
+ 'info_dict': {
+ 'id': '5d47f102-dbdb-4fa0-832b-26c1870311f2',
+ 'title': 'Europese Unie wil "onmiddellijke humanitaire pauze" en "duurzaam staakt-het-vuren" in Gaza',
+ 'description': 'md5:1aad1fae7d39edeffde5d3e67d276b64',
+ 'thumbnail': r're:https?://cds\.vrt\.radio/[^/#\?&]+',
+ 'display_id': 'europese-unie-wil-onmiddellijke-humanitaire-pauze-en-duurzaam-staakt-het-vuren-in-gaza'
+ },
+ 'playlist_mincount': 1
+ }]
+
+ def _extract_video_entries(self, next_js_data, display_id):
+ video_data = traverse_obj(
+ next_js_data, ((None, ('paragraphs', ...)), {lambda x: x if x['mediaReference'] else None}))
+ for data in video_data:
+ media_reference = data['mediaReference']
+ formats, subtitles = self._extract_formats_and_subtitles(
+ self._call_api(media_reference), display_id)
+
+ yield {
+ 'id': media_reference,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ **traverse_obj(data, {
+ 'title': ('title', {str}),
+ 'description': ('body', {clean_html})
+ }),
+ }
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ next_js_data = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['item']
+
+ return self.playlist_result(
+ self._extract_video_entries(next_js_data, display_id), **merge_dicts(traverse_obj(
+ next_js_data, ({
+ 'id': ('id', {str}),
+ 'title': ('title', {str}),
+ 'description': (('description', 'content'), {clean_html}),
+ }), get_all=False), {
+ 'display_id': display_id,
+ 'title': self._html_search_meta(['name', 'og:title', 'twitter:title'], webpage),
+ 'description': self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage),
+ 'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage),
+ }))
diff --git a/yt_dlp/extractor/wistia.py b/yt_dlp/extractor/wistia.py
index bce5e83..f2256fd 100644
--- a/yt_dlp/extractor/wistia.py
+++ b/yt_dlp/extractor/wistia.py
@@ -1,6 +1,6 @@
+import base64
import re
import urllib.parse
-from base64 import b64decode
from .common import InfoExtractor
from ..networking import HEADRequest
@@ -371,7 +371,7 @@ class WistiaChannelIE(WistiaBaseIE):
webpage = self._download_webpage(f'https://fast.wistia.net/embed/channel/{channel_id}', channel_id)
data = self._parse_json(
self._search_regex(r'wchanneljsonp-%s\'\]\s*=[^\"]*\"([A-Za-z0-9=/]*)' % channel_id, webpage, 'jsonp', channel_id),
- channel_id, transform_source=lambda x: urllib.parse.unquote_plus(b64decode(x).decode('utf-8')))
+ channel_id, transform_source=lambda x: urllib.parse.unquote_plus(base64.b64decode(x).decode('utf-8')))
# XXX: can there be more than one series?
series = traverse_obj(data, ('series', 0), default={})
diff --git a/yt_dlp/extractor/xvideos.py b/yt_dlp/extractor/xvideos.py
index 5df0715..59eef84 100644
--- a/yt_dlp/extractor/xvideos.py
+++ b/yt_dlp/extractor/xvideos.py
@@ -15,35 +15,35 @@ class XVideosIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
- (?:[^/]+\.)?xvideos2?\.com/video|
- (?:www\.)?xvideos\.es/video|
+ (?:[^/]+\.)?xvideos2?\.com/video\.?|
+ (?:www\.)?xvideos\.es/video\.?|
(?:www|flashservice)\.xvideos\.com/embedframe/|
static-hw\.xvideos\.com/swf/xv-player\.swf\?.*?\bid_video=
)
- (?P<id>[0-9]+)
+ (?P<id>[0-9a-z]+)
'''
_TESTS = [{
- 'url': 'https://www.xvideos.com/video4588838/motorcycle_guy_cucks_influencer_steals_his_gf',
- 'md5': '14cea69fcb84db54293b1e971466c2e1',
+ 'url': 'http://xvideos.com/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',
+ 'md5': '396255a900a6bddb3e98985f0b86c3fd',
'info_dict': {
- 'id': '4588838',
+ 'id': 'ucuvbkfda4e',
'ext': 'mp4',
- 'title': 'Motorcycle Guy Cucks Influencer, Steals his GF',
- 'duration': 108,
+ 'title': 'A Beautiful Red-Haired Stranger Was Refused, But Still Came To My Room For Sex',
+ 'duration': 1238,
'age_limit': 18,
- 'thumbnail': r're:^https://img-hw.xvideos-cdn.com/.+\.jpg',
+ 'thumbnail': r're:^https://cdn\d+-pic.xvideos-cdn.com/.+\.jpg',
}
}, {
# Broken HLS formats
'url': 'https://www.xvideos.com/video65982001/what_s_her_name',
- 'md5': 'b82d7d7ef7d65a84b1fa6965f81f95a5',
+ 'md5': '56742808292c8fa1418e4538c262c58b',
'info_dict': {
'id': '65982001',
'ext': 'mp4',
'title': 'what\'s her name?',
'duration': 120,
'age_limit': 18,
- 'thumbnail': r're:^https://img-hw.xvideos-cdn.com/.+\.jpg',
+ 'thumbnail': r're:^https://cdn\d+-pic.xvideos-cdn.com/.+\.jpg',
}
}, {
'url': 'https://flashservice.xvideos.com/embedframe/4588838',
@@ -90,6 +90,18 @@ class XVideosIE(InfoExtractor):
}, {
'url': 'https://de.xvideos.com/video4588838/biker_takes_his_girl',
'only_matching': True
+ }, {
+ 'url': 'https://flashservice.xvideos.com/embedframe/ucuvbkfda4e',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.xvideos.com/embedframe/ucuvbkfda4e',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=ucuvbkfda4e',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://xvideos.es/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',
+ 'only_matching': True
}]
def _real_extract(self, url):
diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py
index 33fd3b4..e553fff 100644
--- a/yt_dlp/extractor/youtube.py
+++ b/yt_dlp/extractor/youtube.py
@@ -2,7 +2,7 @@ import base64
import calendar
import collections
import copy
-import datetime
+import datetime as dt
import enum
import hashlib
import itertools
@@ -33,6 +33,7 @@ from ..utils import (
clean_html,
datetime_from_str,
dict_get,
+ filesize_from_tbr,
filter_dict,
float_or_none,
format_field,
@@ -55,6 +56,7 @@ from ..utils import (
str_to_int,
strftime_or_none,
traverse_obj,
+ try_call,
try_get,
unescapeHTML,
unified_strdate,
@@ -922,10 +924,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
def _parse_time_text(self, text):
if not text:
return
- dt = self.extract_relative_time(text)
+ dt_ = self.extract_relative_time(text)
timestamp = None
- if isinstance(dt, datetime.datetime):
- timestamp = calendar.timegm(dt.timetuple())
+ if isinstance(dt_, dt.datetime):
+ timestamp = calendar.timegm(dt_.timetuple())
if timestamp is None:
timestamp = (
@@ -3602,8 +3604,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
yt_query = {
'videoId': video_id,
}
- if _split_innertube_client(client)[0] == 'android':
- yt_query['params'] = 'CgIQBg=='
+ if _split_innertube_client(client)[0] in ('android', 'android_embedscreen'):
+ yt_query['params'] = 'CgIIAQ=='
pp_arg = self._configuration_arg('player_params', [None], casesense=True)[0]
if pp_arg:
@@ -3839,11 +3841,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
10 if audio_track.get('audioIsDefault') and 10
else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10
else -1)
+ format_duration = traverse_obj(fmt, ('approxDurationMs', {lambda x: float_or_none(x, 1000)}))
# Some formats may have much smaller duration than others (possibly damaged during encoding)
# E.g. 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
# Make sure to avoid false positives with small duration differences.
# E.g. __2ABJjxzNo, ySuUZEjARPY
- is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500)
+ is_damaged = try_call(lambda: format_duration < duration // 2)
if is_damaged:
self.report_warning(
f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True)
@@ -3873,6 +3876,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'quality': q(quality) - bool(fmt.get('isDrc')) / 2,
'has_drm': bool(fmt.get('drmFamilies')),
'tbr': tbr,
+ 'filesize_approx': filesize_from_tbr(tbr, format_duration),
'url': fmt_url,
'width': int_or_none(fmt.get('width')),
'language': join_nonempty(audio_track.get('id', '').split('.')[0],
@@ -4564,7 +4568,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if upload_date and live_status not in ('is_live', 'post_live', 'is_upcoming'):
# Newly uploaded videos' HLS formats are potentially problematic and need to be checked
- upload_datetime = datetime_from_str(upload_date).replace(tzinfo=datetime.timezone.utc)
+ upload_datetime = datetime_from_str(upload_date).replace(tzinfo=dt.timezone.utc)
if upload_datetime >= datetime_from_str('today-2days'):
for fmt in info['formats']:
if fmt.get('protocol') == 'm3u8_native':
@@ -6965,7 +6969,7 @@ class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_DESC = 'YouTube search'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
- _SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
+ _SEARCH_PARAMS = 'EgIQAfABAQ==' # Videos only
_TESTS = [{
'url': 'ytsearch5:youtube-dl test video',
'playlist_count': 5,
@@ -6973,6 +6977,14 @@ class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
+ }, {
+ 'note': 'Suicide/self-harm search warning',
+ 'url': 'ytsearch1:i hate myself and i wanna die',
+ 'playlist_count': 1,
+ 'info_dict': {
+ 'id': 'i hate myself and i wanna die',
+ 'title': 'i hate myself and i wanna die',
+ }
}]
@@ -6980,7 +6992,7 @@ class YoutubeSearchDateIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube search, newest videos first'
- _SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
+ _SEARCH_PARAMS = 'CAISAhAB8AEB' # Videos only, sorted by date
_TESTS = [{
'url': 'ytsearchdate5:youtube-dl test video',
'playlist_count': 5,
diff --git a/yt_dlp/extractor/zattoo.py b/yt_dlp/extractor/zattoo.py
index 6bd9ea0..5cc9c5f 100644
--- a/yt_dlp/extractor/zattoo.py
+++ b/yt_dlp/extractor/zattoo.py
@@ -1,5 +1,5 @@
import re
-from uuid import uuid4
+import uuid
from .common import InfoExtractor
from ..compat import compat_str
@@ -53,7 +53,7 @@ class ZattooPlatformBaseIE(InfoExtractor):
self._request_webpage(
'%s/zapi/v3/session/hello' % self._host_url(), None,
'Opening session', data=urlencode_postdata({
- 'uuid': compat_str(uuid4()),
+ 'uuid': compat_str(uuid.uuid4()),
'lang': 'en',
'app_version': '1.8.2',
'format': 'json',
diff --git a/yt_dlp/networking/__init__.py b/yt_dlp/networking/__init__.py
index acadc01..356712c 100644
--- a/yt_dlp/networking/__init__.py
+++ b/yt_dlp/networking/__init__.py
@@ -28,3 +28,10 @@ except ImportError:
pass
except Exception as e:
warnings.warn(f'Failed to import "websockets" request handler: {e}' + bug_reports_message())
+
+try:
+ from . import _curlcffi # noqa: F401
+except ImportError:
+ pass
+except Exception as e:
+ warnings.warn(f'Failed to import "curl_cffi" request handler: {e}' + bug_reports_message())
diff --git a/yt_dlp/networking/_curlcffi.py b/yt_dlp/networking/_curlcffi.py
new file mode 100644
index 0000000..39d1f70
--- /dev/null
+++ b/yt_dlp/networking/_curlcffi.py
@@ -0,0 +1,221 @@
+from __future__ import annotations
+
+import io
+import math
+import urllib.parse
+
+from ._helper import InstanceStoreMixin, select_proxy
+from .common import (
+ Features,
+ Request,
+ Response,
+ register_preference,
+ register_rh,
+)
+from .exceptions import (
+ CertificateVerifyError,
+ HTTPError,
+ IncompleteRead,
+ ProxyError,
+ SSLError,
+ TransportError,
+)
+from .impersonate import ImpersonateRequestHandler, ImpersonateTarget
+from ..dependencies import curl_cffi
+from ..utils import int_or_none
+
+if curl_cffi is None:
+ raise ImportError('curl_cffi is not installed')
+
+curl_cffi_version = tuple(int_or_none(x, default=0) for x in curl_cffi.__version__.split('.'))
+
+if curl_cffi_version != (0, 5, 10):
+ curl_cffi._yt_dlp__version = f'{curl_cffi.__version__} (unsupported)'
+ raise ImportError('Only curl_cffi 0.5.10 is supported')
+
+import curl_cffi.requests
+from curl_cffi.const import CurlECode, CurlOpt
+
+
+class CurlCFFIResponseReader(io.IOBase):
+ def __init__(self, response: curl_cffi.requests.Response):
+ self._response = response
+ self._iterator = response.iter_content()
+ self._buffer = b''
+ self.bytes_read = 0
+
+ def readable(self):
+ return True
+
+ def read(self, size=None):
+ exception_raised = True
+ try:
+ while self._iterator and (size is None or len(self._buffer) < size):
+ chunk = next(self._iterator, None)
+ if chunk is None:
+ self._iterator = None
+ break
+ self._buffer += chunk
+ self.bytes_read += len(chunk)
+
+ if size is None:
+ size = len(self._buffer)
+ data = self._buffer[:size]
+ self._buffer = self._buffer[size:]
+
+ # "free" the curl instance if the response is fully read.
+ # curl_cffi doesn't do this automatically and only allows one open response per thread
+ if not self._iterator and not self._buffer:
+ self.close()
+ exception_raised = False
+ return data
+ finally:
+ if exception_raised:
+ self.close()
+
+ def close(self):
+ if not self.closed:
+ self._response.close()
+ self._buffer = b''
+ super().close()
+
+
+class CurlCFFIResponseAdapter(Response):
+ fp: CurlCFFIResponseReader
+
+ def __init__(self, response: curl_cffi.requests.Response):
+ super().__init__(
+ fp=CurlCFFIResponseReader(response),
+ headers=response.headers,
+ url=response.url,
+ status=response.status_code)
+
+ def read(self, amt=None):
+ try:
+ return self.fp.read(amt)
+ except curl_cffi.requests.errors.RequestsError as e:
+ if e.code == CurlECode.PARTIAL_FILE:
+ content_length = int_or_none(e.response.headers.get('Content-Length'))
+ raise IncompleteRead(
+ partial=self.fp.bytes_read,
+ expected=content_length - self.fp.bytes_read if content_length is not None else None,
+ cause=e) from e
+ raise TransportError(cause=e) from e
+
+
+@register_rh
+class CurlCFFIRH(ImpersonateRequestHandler, InstanceStoreMixin):
+ RH_NAME = 'curl_cffi'
+ _SUPPORTED_URL_SCHEMES = ('http', 'https')
+ _SUPPORTED_FEATURES = (Features.NO_PROXY, Features.ALL_PROXY)
+ _SUPPORTED_PROXY_SCHEMES = ('http', 'https', 'socks4', 'socks4a', 'socks5', 'socks5h')
+ _SUPPORTED_IMPERSONATE_TARGET_MAP = {
+ ImpersonateTarget('chrome', '110', 'windows', '10'): curl_cffi.requests.BrowserType.chrome110,
+ ImpersonateTarget('chrome', '107', 'windows', '10'): curl_cffi.requests.BrowserType.chrome107,
+ ImpersonateTarget('chrome', '104', 'windows', '10'): curl_cffi.requests.BrowserType.chrome104,
+ ImpersonateTarget('chrome', '101', 'windows', '10'): curl_cffi.requests.BrowserType.chrome101,
+ ImpersonateTarget('chrome', '100', 'windows', '10'): curl_cffi.requests.BrowserType.chrome100,
+ ImpersonateTarget('chrome', '99', 'windows', '10'): curl_cffi.requests.BrowserType.chrome99,
+ ImpersonateTarget('edge', '101', 'windows', '10'): curl_cffi.requests.BrowserType.edge101,
+ ImpersonateTarget('edge', '99', 'windows', '10'): curl_cffi.requests.BrowserType.edge99,
+ ImpersonateTarget('safari', '15.5', 'macos', '12'): curl_cffi.requests.BrowserType.safari15_5,
+ ImpersonateTarget('safari', '15.3', 'macos', '11'): curl_cffi.requests.BrowserType.safari15_3,
+ ImpersonateTarget('chrome', '99', 'android', '12'): curl_cffi.requests.BrowserType.chrome99_android,
+ }
+
+ def _create_instance(self, cookiejar=None):
+ return curl_cffi.requests.Session(cookies=cookiejar)
+
+ def _check_extensions(self, extensions):
+ super()._check_extensions(extensions)
+ extensions.pop('impersonate', None)
+ extensions.pop('cookiejar', None)
+ extensions.pop('timeout', None)
+
+ def _send(self, request: Request):
+ max_redirects_exceeded = False
+ session: curl_cffi.requests.Session = self._get_instance(
+ cookiejar=self._get_cookiejar(request) if 'cookie' not in request.headers else None)
+
+ if self.verbose:
+ session.curl.setopt(CurlOpt.VERBOSE, 1)
+
+ proxies = self._get_proxies(request)
+ if 'no' in proxies:
+ session.curl.setopt(CurlOpt.NOPROXY, proxies['no'])
+ proxies.pop('no', None)
+
+ # curl doesn't support per protocol proxies, so we select the one that matches the request protocol
+ proxy = select_proxy(request.url, proxies=proxies)
+ if proxy:
+ session.curl.setopt(CurlOpt.PROXY, proxy)
+ scheme = urllib.parse.urlparse(request.url).scheme.lower()
+ if scheme != 'http':
+ # Enable HTTP CONNECT for HTTPS urls.
+ # Don't use CONNECT for http for compatibility with urllib behaviour.
+ # See: https://curl.se/libcurl/c/CURLOPT_HTTPPROXYTUNNEL.html
+ session.curl.setopt(CurlOpt.HTTPPROXYTUNNEL, 1)
+
+ headers = self._get_impersonate_headers(request)
+
+ if self._client_cert:
+ session.curl.setopt(CurlOpt.SSLCERT, self._client_cert['client_certificate'])
+ client_certificate_key = self._client_cert.get('client_certificate_key')
+ client_certificate_password = self._client_cert.get('client_certificate_password')
+ if client_certificate_key:
+ session.curl.setopt(CurlOpt.SSLKEY, client_certificate_key)
+ if client_certificate_password:
+ session.curl.setopt(CurlOpt.KEYPASSWD, client_certificate_password)
+
+ timeout = self._calculate_timeout(request)
+
+ # set CURLOPT_LOW_SPEED_LIMIT and CURLOPT_LOW_SPEED_TIME to act as a read timeout. [1]
+ # curl_cffi does not currently do this. [2]
+ # Note: CURLOPT_LOW_SPEED_TIME is in seconds, so we need to round up to the nearest second. [3]
+ # [1] https://unix.stackexchange.com/a/305311
+ # [2] https://github.com/yifeikong/curl_cffi/issues/156
+ # [3] https://curl.se/libcurl/c/CURLOPT_LOW_SPEED_TIME.html
+ session.curl.setopt(CurlOpt.LOW_SPEED_LIMIT, 1) # 1 byte per second
+ session.curl.setopt(CurlOpt.LOW_SPEED_TIME, math.ceil(timeout))
+
+ try:
+ curl_response = session.request(
+ method=request.method,
+ url=request.url,
+ headers=headers,
+ data=request.data,
+ verify=self.verify,
+ max_redirects=5,
+ timeout=timeout,
+ impersonate=self._SUPPORTED_IMPERSONATE_TARGET_MAP.get(
+ self._get_request_target(request)),
+ interface=self.source_address,
+ stream=True
+ )
+ except curl_cffi.requests.errors.RequestsError as e:
+ if e.code == CurlECode.PEER_FAILED_VERIFICATION:
+ raise CertificateVerifyError(cause=e) from e
+
+ elif e.code == CurlECode.SSL_CONNECT_ERROR:
+ raise SSLError(cause=e) from e
+
+ elif e.code == CurlECode.TOO_MANY_REDIRECTS:
+ max_redirects_exceeded = True
+ curl_response = e.response
+
+ elif e.code == CurlECode.PROXY:
+ raise ProxyError(cause=e) from e
+ else:
+ raise TransportError(cause=e) from e
+
+ response = CurlCFFIResponseAdapter(curl_response)
+
+ if not 200 <= response.status < 300:
+ raise HTTPError(response, redirect_loop=max_redirects_exceeded)
+
+ return response
+
+
+@register_preference(CurlCFFIRH)
+def curl_cffi_preference(rh, request):
+ return -100
diff --git a/yt_dlp/networking/_helper.py b/yt_dlp/networking/_helper.py
index d79dd79..8e678b2 100644
--- a/yt_dlp/networking/_helper.py
+++ b/yt_dlp/networking/_helper.py
@@ -2,6 +2,7 @@ from __future__ import annotations
import contextlib
import functools
+import os
import socket
import ssl
import sys
@@ -121,6 +122,9 @@ def make_ssl_context(
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = verify
context.verify_mode = ssl.CERT_REQUIRED if verify else ssl.CERT_NONE
+ # OpenSSL 1.1.1+ Python 3.8+ keylog file
+ if hasattr(context, 'keylog_filename'):
+ context.keylog_filename = os.environ.get('SSLKEYLOGFILE') or None
# Some servers may reject requests if ALPN extension is not sent. See:
# https://github.com/python/cpython/issues/85140
diff --git a/yt_dlp/networking/_requests.py b/yt_dlp/networking/_requests.py
index 6545028..e3edc77 100644
--- a/yt_dlp/networking/_requests.py
+++ b/yt_dlp/networking/_requests.py
@@ -307,8 +307,7 @@ class RequestsRH(RequestHandler, InstanceStoreMixin):
max_redirects_exceeded = False
- session = self._get_instance(
- cookiejar=request.extensions.get('cookiejar') or self.cookiejar)
+ session = self._get_instance(cookiejar=self._get_cookiejar(request))
try:
requests_res = session.request(
@@ -316,8 +315,8 @@ class RequestsRH(RequestHandler, InstanceStoreMixin):
url=request.url,
data=request.data,
headers=headers,
- timeout=float(request.extensions.get('timeout') or self.timeout),
- proxies=request.proxies or self.proxies,
+ timeout=self._calculate_timeout(request),
+ proxies=self._get_proxies(request),
allow_redirects=True,
stream=True
)
diff --git a/yt_dlp/networking/_urllib.py b/yt_dlp/networking/_urllib.py
index cb4dae3..ff110dc 100644
--- a/yt_dlp/networking/_urllib.py
+++ b/yt_dlp/networking/_urllib.py
@@ -389,11 +389,11 @@ class UrllibRH(RequestHandler, InstanceStoreMixin):
)
opener = self._get_instance(
- proxies=request.proxies or self.proxies,
- cookiejar=request.extensions.get('cookiejar') or self.cookiejar
+ proxies=self._get_proxies(request),
+ cookiejar=self._get_cookiejar(request)
)
try:
- res = opener.open(urllib_req, timeout=float(request.extensions.get('timeout') or self.timeout))
+ res = opener.open(urllib_req, timeout=self._calculate_timeout(request))
except urllib.error.HTTPError as e:
if isinstance(e.fp, (http.client.HTTPResponse, urllib.response.addinfourl)):
# Prevent file object from being closed when urllib.error.HTTPError is destroyed.
diff --git a/yt_dlp/networking/_websockets.py b/yt_dlp/networking/_websockets.py
index 1597932..6e235b0 100644
--- a/yt_dlp/networking/_websockets.py
+++ b/yt_dlp/networking/_websockets.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import contextlib
import io
import logging
import ssl
@@ -38,27 +39,40 @@ if websockets_version < (12, 0):
import websockets.sync.client
from websockets.uri import parse_uri
+# In websockets Connection, recv_exc and recv_events_exc are defined
+# after the recv events handler thread is started [1].
+# On our CI using PyPy, in some cases a race condition may occur
+# where the recv events handler thread tries to use these attributes before they are defined [2].
+# 1: https://github.com/python-websockets/websockets/blame/de768cf65e7e2b1a3b67854fb9e08816a5ff7050/src/websockets/sync/connection.py#L93
+# 2: "AttributeError: 'ClientConnection' object has no attribute 'recv_events_exc'. Did you mean: 'recv_events'?"
+import websockets.sync.connection # isort: split
+with contextlib.suppress(Exception):
+ # > 12.0
+ websockets.sync.connection.Connection.recv_exc = None
+ # 12.0
+ websockets.sync.connection.Connection.recv_events_exc = None
+
class WebsocketsResponseAdapter(WebSocketResponse):
- def __init__(self, wsw: websockets.sync.client.ClientConnection, url):
+ def __init__(self, ws: websockets.sync.client.ClientConnection, url):
super().__init__(
- fp=io.BytesIO(wsw.response.body or b''),
+ fp=io.BytesIO(ws.response.body or b''),
url=url,
- headers=wsw.response.headers,
- status=wsw.response.status_code,
- reason=wsw.response.reason_phrase,
+ headers=ws.response.headers,
+ status=ws.response.status_code,
+ reason=ws.response.reason_phrase,
)
- self.wsw = wsw
+ self._ws = ws
def close(self):
- self.wsw.close()
+ self._ws.close()
super().close()
def send(self, message):
# https://websockets.readthedocs.io/en/stable/reference/sync/client.html#websockets.sync.client.ClientConnection.send
try:
- return self.wsw.send(message)
+ return self._ws.send(message)
except (websockets.exceptions.WebSocketException, RuntimeError, TimeoutError) as e:
raise TransportError(cause=e) from e
except SocksProxyError as e:
@@ -69,7 +83,7 @@ class WebsocketsResponseAdapter(WebSocketResponse):
def recv(self):
# https://websockets.readthedocs.io/en/stable/reference/sync/client.html#websockets.sync.client.ClientConnection.recv
try:
- return self.wsw.recv()
+ return self._ws.recv()
except SocksProxyError as e:
raise ProxyError(cause=e) from e
except (websockets.exceptions.WebSocketException, RuntimeError, TimeoutError) as e:
@@ -112,10 +126,10 @@ class WebsocketsRH(WebSocketRequestHandler):
logging.getLogger(name).removeHandler(handler)
def _send(self, request):
- timeout = float(request.extensions.get('timeout') or self.timeout)
+ timeout = self._calculate_timeout(request)
headers = self._merge_headers(request.headers)
if 'cookie' not in headers:
- cookiejar = request.extensions.get('cookiejar') or self.cookiejar
+ cookiejar = self._get_cookiejar(request)
cookie_header = cookiejar.get_cookie_header(request.url)
if cookie_header:
headers['cookie'] = cookie_header
@@ -125,7 +139,7 @@ class WebsocketsRH(WebSocketRequestHandler):
'source_address': (self.source_address, 0) if self.source_address else None,
'timeout': timeout
}
- proxy = select_proxy(request.url, request.proxies or self.proxies or {})
+ proxy = select_proxy(request.url, self._get_proxies(request))
try:
if proxy:
socks_proxy_options = make_socks_proxy_opts(proxy)
diff --git a/yt_dlp/networking/common.py b/yt_dlp/networking/common.py
index 39442ba..4c66ba6 100644
--- a/yt_dlp/networking/common.py
+++ b/yt_dlp/networking/common.py
@@ -256,6 +256,15 @@ class RequestHandler(abc.ABC):
def _merge_headers(self, request_headers):
return HTTPHeaderDict(self.headers, request_headers)
+ def _calculate_timeout(self, request):
+ return float(request.extensions.get('timeout') or self.timeout)
+
+ def _get_cookiejar(self, request):
+ return request.extensions.get('cookiejar') or self.cookiejar
+
+ def _get_proxies(self, request):
+ return (request.proxies or self.proxies).copy()
+
def _check_url_scheme(self, request: Request):
scheme = urllib.parse.urlparse(request.url).scheme.lower()
if self._SUPPORTED_URL_SCHEMES is not None and scheme not in self._SUPPORTED_URL_SCHEMES:
@@ -454,9 +463,10 @@ class Request:
else:
raise TypeError('headers must be a mapping')
- def update(self, url=None, data=None, headers=None, query=None):
+ def update(self, url=None, data=None, headers=None, query=None, extensions=None):
self.data = data if data is not None else self.data
self.headers.update(headers or {})
+ self.extensions.update(extensions or {})
self.url = update_url_query(url or self.url, query or {})
def copy(self):
@@ -491,7 +501,7 @@ class Response(io.IOBase):
def __init__(
self,
- fp: typing.IO,
+ fp: io.IOBase,
url: str,
headers: Mapping[str, str],
status: int = 200,
diff --git a/yt_dlp/networking/impersonate.py b/yt_dlp/networking/impersonate.py
new file mode 100644
index 0000000..ca66180
--- /dev/null
+++ b/yt_dlp/networking/impersonate.py
@@ -0,0 +1,141 @@
+from __future__ import annotations
+
+import re
+from abc import ABC
+from dataclasses import dataclass
+from typing import Any
+
+from .common import RequestHandler, register_preference
+from .exceptions import UnsupportedRequest
+from ..compat.types import NoneType
+from ..utils import classproperty, join_nonempty
+from ..utils.networking import std_headers
+
+
+@dataclass(order=True, frozen=True)
+class ImpersonateTarget:
+ """
+ A target for browser impersonation.
+
+ Parameters:
+ @param client: the client to impersonate
+ @param version: the client version to impersonate
+ @param os: the client OS to impersonate
+ @param os_version: the client OS version to impersonate
+
+ Note: None is used to indicate to match any.
+
+ """
+ client: str | None = None
+ version: str | None = None
+ os: str | None = None
+ os_version: str | None = None
+
+ def __post_init__(self):
+ if self.version and not self.client:
+ raise ValueError('client is required if version is set')
+ if self.os_version and not self.os:
+ raise ValueError('os is required if os_version is set')
+
+ def __contains__(self, target: ImpersonateTarget):
+ if not isinstance(target, ImpersonateTarget):
+ return False
+ return (
+ (self.client is None or target.client is None or self.client == target.client)
+ and (self.version is None or target.version is None or self.version == target.version)
+ and (self.os is None or target.os is None or self.os == target.os)
+ and (self.os_version is None or target.os_version is None or self.os_version == target.os_version)
+ )
+
+ def __str__(self):
+ return f'{join_nonempty(self.client, self.version)}:{join_nonempty(self.os, self.os_version)}'.rstrip(':')
+
+ @classmethod
+ def from_str(cls, target: str):
+ mobj = re.fullmatch(r'(?:(?P<client>[^:-]+)(?:-(?P<version>[^:-]+))?)?(?::(?:(?P<os>[^:-]+)(?:-(?P<os_version>[^:-]+))?)?)?', target)
+ if not mobj:
+ raise ValueError(f'Invalid impersonate target "{target}"')
+ return cls(**mobj.groupdict())
+
+
+class ImpersonateRequestHandler(RequestHandler, ABC):
+ """
+ Base class for request handlers that support browser impersonation.
+
+ This provides a method for checking the validity of the impersonate extension,
+ which can be used in _check_extensions.
+
+ Impersonate targets consist of a client, version, os and os_ver.
+ See the ImpersonateTarget class for more details.
+
+ The following may be defined:
+ - `_SUPPORTED_IMPERSONATE_TARGET_MAP`: a dict mapping supported targets to custom object.
+ Any Request with an impersonate target not in this list will raise an UnsupportedRequest.
+ Set to None to disable this check.
+ Note: Entries are in order of preference
+
+ Parameters:
+ @param impersonate: the default impersonate target to use for requests.
+ Set to None to disable impersonation.
+ """
+ _SUPPORTED_IMPERSONATE_TARGET_MAP: dict[ImpersonateTarget, Any] = {}
+
+ def __init__(self, *, impersonate: ImpersonateTarget = None, **kwargs):
+ super().__init__(**kwargs)
+ self.impersonate = impersonate
+
+ def _check_impersonate_target(self, target: ImpersonateTarget):
+ assert isinstance(target, (ImpersonateTarget, NoneType))
+ if target is None or not self.supported_targets:
+ return
+ if not self.is_supported_target(target):
+ raise UnsupportedRequest(f'Unsupported impersonate target: {target}')
+
+ def _check_extensions(self, extensions):
+ super()._check_extensions(extensions)
+ if 'impersonate' in extensions:
+ self._check_impersonate_target(extensions.get('impersonate'))
+
+ def _validate(self, request):
+ super()._validate(request)
+ self._check_impersonate_target(self.impersonate)
+
+ def _resolve_target(self, target: ImpersonateTarget | None):
+ """Resolve a target to a supported target."""
+ if target is None:
+ return
+ for supported_target in self.supported_targets:
+ if target in supported_target:
+ if self.verbose:
+ self._logger.stdout(
+ f'{self.RH_NAME}: resolved impersonate target {target} to {supported_target}')
+ return supported_target
+
+ @classproperty
+ def supported_targets(self) -> tuple[ImpersonateTarget, ...]:
+ return tuple(self._SUPPORTED_IMPERSONATE_TARGET_MAP.keys())
+
+ def is_supported_target(self, target: ImpersonateTarget):
+ assert isinstance(target, ImpersonateTarget)
+ return self._resolve_target(target) is not None
+
+ def _get_request_target(self, request):
+ """Get the requested target for the request"""
+ return self._resolve_target(request.extensions.get('impersonate') or self.impersonate)
+
+ def _get_impersonate_headers(self, request):
+ headers = self._merge_headers(request.headers)
+ if self._get_request_target(request) is not None:
+ # remove all headers present in std_headers
+ # todo: change this to not depend on std_headers
+ for k, v in std_headers.items():
+ if headers.get(k) == v:
+ headers.pop(k)
+ return headers
+
+
+@register_preference(ImpersonateRequestHandler)
+def impersonate_preference(rh, request):
+ if request.extensions.get('impersonate') or rh.impersonate:
+ return 1000
+ return 0
diff --git a/yt_dlp/options.py b/yt_dlp/options.py
index f884727..faa1ee5 100644
--- a/yt_dlp/options.py
+++ b/yt_dlp/options.py
@@ -516,6 +516,18 @@ def create_parser():
help='Client-side IP address to bind to',
)
network.add_option(
+ '--impersonate',
+ metavar='CLIENT[:OS]', dest='impersonate', default=None,
+ help=(
+ 'Client to impersonate for requests. E.g. chrome, chrome-110, chrome:windows-10. '
+ 'Pass --impersonate="" to impersonate any client.'),
+ )
+ network.add_option(
+ '--list-impersonate-targets',
+ dest='list_impersonate_targets', default=False, action='store_true',
+ help='List available clients to impersonate.',
+ )
+ network.add_option(
'-4', '--force-ipv4',
action='store_const', const='0.0.0.0', dest='source_address',
help='Make all connections via IPv4',
@@ -680,6 +692,10 @@ def create_parser():
action='store_true', dest='break_on_existing', default=False,
help='Stop the download process when encountering a file that is in the archive')
selection.add_option(
+ '--no-break-on-existing',
+ action='store_false', dest='break_on_existing',
+ help='Do not stop the download process when encountering a file that is in the archive (default)')
+ selection.add_option(
'--break-on-reject',
action='store_true', dest='break_on_reject', default=False,
help=optparse.SUPPRESS_HELP)
@@ -1243,6 +1259,10 @@ def create_parser():
# TODO: Document the fields inside "progress"
'--console-title --progress-template "download-title:%(info.id)s-%(progress.eta)s"'))
verbosity.add_option(
+ '--progress-delta',
+ metavar='SECONDS', action='store', dest='progress_delta', type=float, default=0,
+ help='Time between progress output (default: 0)')
+ verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print various debugging information')
diff --git a/yt_dlp/update.py b/yt_dlp/update.py
index db50cfa..f47cbc5 100644
--- a/yt_dlp/update.py
+++ b/yt_dlp/update.py
@@ -114,7 +114,7 @@ _NON_UPDATEABLE_REASONS = {
**{variant: f'Auto-update is not supported for unpackaged {name} executable; Re-download the latest release'
for variant, name in {'win32_dir': 'Windows', 'darwin_dir': 'MacOS', 'linux_dir': 'Linux'}.items()},
'source': 'You cannot update when running from source code; Use git to pull the latest changes',
- 'unknown': 'You installed yt-dlp with a package manager or setup.py; Use that to update',
+ 'unknown': 'You installed yt-dlp from a manual build or with a package manager; Use that to update',
'other': 'You are using an unofficial build of yt-dlp; Build the executable again',
}
diff --git a/yt_dlp/utils/_utils.py b/yt_dlp/utils/_utils.py
index 9efeb6a..e3e80f3 100644
--- a/yt_dlp/utils/_utils.py
+++ b/yt_dlp/utils/_utils.py
@@ -5,7 +5,7 @@ import codecs
import collections
import collections.abc
import contextlib
-import datetime
+import datetime as dt
import email.header
import email.utils
import errno
@@ -50,7 +50,6 @@ from ..compat import (
compat_expanduser,
compat_HTMLParseError,
compat_os_name,
- compat_shlex_quote,
)
from ..dependencies import xattr
@@ -836,9 +835,11 @@ class Popen(subprocess.Popen):
if shell and compat_os_name == 'nt' and kwargs.get('executable') is None:
if not isinstance(args, str):
- args = ' '.join(compat_shlex_quote(a) for a in args)
+ args = shell_quote(args, shell=True)
shell = False
- args = f'{self.__comspec()} /Q /S /D /V:OFF /C "{args}"'
+ # Set variable for `cmd.exe` newline escaping (see `utils.shell_quote`)
+ env['='] = '"^\n\n"'
+ args = f'{self.__comspec()} /Q /S /D /V:OFF /E:ON /C "{args}"'
super().__init__(args, *remaining, env=env, shell=shell, **kwargs, startupinfo=self._startupinfo)
@@ -1150,14 +1151,14 @@ def extract_timezone(date_str):
timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
if timezone is not None:
date_str = date_str[:-len(m.group('tz'))]
- timezone = datetime.timedelta(hours=timezone or 0)
+ timezone = dt.timedelta(hours=timezone or 0)
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
- timezone = datetime.timedelta()
+ timezone = dt.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
- timezone = datetime.timedelta(
+ timezone = dt.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
@@ -1176,8 +1177,8 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
with contextlib.suppress(ValueError):
date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
- dt = datetime.datetime.strptime(date_str, date_format) - timezone
- return calendar.timegm(dt.timetuple())
+ dt_ = dt.datetime.strptime(date_str, date_format) - timezone
+ return calendar.timegm(dt_.timetuple())
def date_formats(day_first=True):
@@ -1198,12 +1199,12 @@ def unified_strdate(date_str, day_first=True):
for expression in date_formats(day_first):
with contextlib.suppress(ValueError):
- upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
+ upload_date = dt.datetime.strptime(date_str, expression).strftime('%Y%m%d')
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
with contextlib.suppress(ValueError):
- upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
+ upload_date = dt.datetime(*timetuple[:6]).strftime('%Y%m%d')
if upload_date is not None:
return str(upload_date)
@@ -1233,8 +1234,8 @@ def unified_timestamp(date_str, day_first=True):
for expression in date_formats(day_first):
with contextlib.suppress(ValueError):
- dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
- return calendar.timegm(dt.timetuple())
+ dt_ = dt.datetime.strptime(date_str, expression) - timezone + dt.timedelta(hours=pm_delta)
+ return calendar.timegm(dt_.timetuple())
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
@@ -1272,11 +1273,11 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
if precision == 'auto':
auto_precision = True
precision = 'microsecond'
- today = datetime_round(datetime.datetime.now(datetime.timezone.utc), precision)
+ today = datetime_round(dt.datetime.now(dt.timezone.utc), precision)
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
- return today - datetime.timedelta(days=1)
+ return today - dt.timedelta(days=1)
match = re.match(
r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
date_str)
@@ -1291,13 +1292,13 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
if unit == 'week':
unit = 'day'
time *= 7
- delta = datetime.timedelta(**{unit + 's': time})
+ delta = dt.timedelta(**{unit + 's': time})
new_date = start_time + delta
if auto_precision:
return datetime_round(new_date, unit)
return new_date
- return datetime_round(datetime.datetime.strptime(date_str, format), precision)
+ return datetime_round(dt.datetime.strptime(date_str, format), precision)
def date_from_str(date_str, format='%Y%m%d', strict=False):
@@ -1312,21 +1313,21 @@ def date_from_str(date_str, format='%Y%m%d', strict=False):
return datetime_from_str(date_str, precision='microsecond', format=format).date()
-def datetime_add_months(dt, months):
+def datetime_add_months(dt_, months):
"""Increment/Decrement a datetime object by months."""
- month = dt.month + months - 1
- year = dt.year + month // 12
+ month = dt_.month + months - 1
+ year = dt_.year + month // 12
month = month % 12 + 1
- day = min(dt.day, calendar.monthrange(year, month)[1])
- return dt.replace(year, month, day)
+ day = min(dt_.day, calendar.monthrange(year, month)[1])
+ return dt_.replace(year, month, day)
-def datetime_round(dt, precision='day'):
+def datetime_round(dt_, precision='day'):
"""
Round a datetime object's time to a specific precision
"""
if precision == 'microsecond':
- return dt
+ return dt_
unit_seconds = {
'day': 86400,
@@ -1335,8 +1336,8 @@ def datetime_round(dt, precision='day'):
'second': 1,
}
roundto = lambda x, n: ((x + n / 2) // n) * n
- timestamp = roundto(calendar.timegm(dt.timetuple()), unit_seconds[precision])
- return datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
+ timestamp = roundto(calendar.timegm(dt_.timetuple()), unit_seconds[precision])
+ return dt.datetime.fromtimestamp(timestamp, dt.timezone.utc)
def hyphenate_date(date_str):
@@ -1357,11 +1358,11 @@ class DateRange:
if start is not None:
self.start = date_from_str(start, strict=True)
else:
- self.start = datetime.datetime.min.date()
+ self.start = dt.datetime.min.date()
if end is not None:
self.end = date_from_str(end, strict=True)
else:
- self.end = datetime.datetime.max.date()
+ self.end = dt.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@@ -1372,7 +1373,7 @@ class DateRange:
def __contains__(self, date):
"""Check if the date is in the range"""
- if not isinstance(date, datetime.date):
+ if not isinstance(date, dt.date):
date = date_from_str(date)
return self.start <= date <= self.end
@@ -1637,15 +1638,38 @@ def get_filesystem_encoding():
return encoding if encoding is not None else 'utf-8'
-def shell_quote(args):
- quoted_args = []
- encoding = get_filesystem_encoding()
- for a in args:
- if isinstance(a, bytes):
- # We may get a filename encoded with 'encodeFilename'
- a = a.decode(encoding)
- quoted_args.append(compat_shlex_quote(a))
- return ' '.join(quoted_args)
+_WINDOWS_QUOTE_TRANS = str.maketrans({'"': '\\"', '\\': '\\\\'})
+_CMD_QUOTE_TRANS = str.maketrans({
+ # Keep quotes balanced by replacing them with `""` instead of `\\"`
+ '"': '""',
+ # Requires a variable `=` containing `"^\n\n"` (set in `utils.Popen`)
+ # `=` should be unique since variables containing `=` cannot be set using cmd
+ '\n': '%=%',
+ # While we are only required to escape backslashes immediately before quotes,
+ # we instead escape all of 'em anyways to be consistent
+ '\\': '\\\\',
+ # Use zero length variable replacement so `%` doesn't get expanded
+ # `cd` is always set as long as extensions are enabled (`/E:ON` in `utils.Popen`)
+ '%': '%%cd:~,%',
+})
+
+
+def shell_quote(args, *, shell=False):
+ args = list(variadic(args))
+ if any(isinstance(item, bytes) for item in args):
+ deprecation_warning('Passing bytes to utils.shell_quote is deprecated')
+ encoding = get_filesystem_encoding()
+ for index, item in enumerate(args):
+ if isinstance(item, bytes):
+ args[index] = item.decode(encoding)
+
+ if compat_os_name != 'nt':
+ return shlex.join(args)
+
+ trans = _CMD_QUOTE_TRANS if shell else _WINDOWS_QUOTE_TRANS
+ return ' '.join(
+ s if re.fullmatch(r'[\w#$*\-+./:?@\\]+', s, re.ASCII) else s.translate(trans).join('""')
+ for s in args)
def smuggle_url(url, data):
@@ -1996,12 +2020,12 @@ def strftime_or_none(timestamp, date_format='%Y%m%d', default=None):
if isinstance(timestamp, (int, float)): # unix timestamp
# Using naive datetime here can break timestamp() in Windows
# Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
- # Also, datetime.datetime.fromtimestamp breaks for negative timestamps
+ # Also, dt.datetime.fromtimestamp breaks for negative timestamps
# Ref: https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
- datetime_object = (datetime.datetime.fromtimestamp(0, datetime.timezone.utc)
- + datetime.timedelta(seconds=timestamp))
+ datetime_object = (dt.datetime.fromtimestamp(0, dt.timezone.utc)
+ + dt.timedelta(seconds=timestamp))
elif isinstance(timestamp, str): # assume YYYYMMDD
- datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
+ datetime_object = dt.datetime.strptime(timestamp, '%Y%m%d')
date_format = re.sub( # Support %s on windows
r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
return datetime_object.strftime(date_format)
@@ -2849,7 +2873,7 @@ def ytdl_is_updateable():
def args_to_str(args):
# Get a short string representation for a subprocess command
- return ' '.join(compat_shlex_quote(a) for a in args)
+ return shell_quote(args)
def error_to_str(err):
@@ -4490,10 +4514,10 @@ def write_xattr(path, key, value):
def random_birthday(year_field, month_field, day_field):
- start_date = datetime.date(1950, 1, 1)
- end_date = datetime.date(1995, 12, 31)
+ start_date = dt.date(1950, 1, 1)
+ end_date = dt.date(1995, 12, 31)
offset = random.randint(0, (end_date - start_date).days)
- random_date = start_date + datetime.timedelta(offset)
+ random_date = start_date + dt.timedelta(offset)
return {
year_field: str(random_date.year),
month_field: str(random_date.month),
@@ -4672,7 +4696,7 @@ def time_seconds(**kwargs):
"""
Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
"""
- return time.time() + datetime.timedelta(**kwargs).total_seconds()
+ return time.time() + dt.timedelta(**kwargs).total_seconds()
# create a JSON Web Signature (jws) with HS256 algorithm
@@ -5415,6 +5439,17 @@ class FormatSorter:
return tuple(self._calculate_field_preference(format, field) for field in self._order)
+def filesize_from_tbr(tbr, duration):
+ """
+ @param tbr: Total bitrate in kbps (1000 bits/sec)
+ @param duration: Duration in seconds
+ @returns Filesize in bytes
+ """
+ if tbr is None or duration is None:
+ return None
+ return int(duration * tbr * (1000 / 8))
+
+
# XXX: Temporary
class _YDLLogger:
def __init__(self, ydl=None):
diff --git a/yt_dlp/utils/traversal.py b/yt_dlp/utils/traversal.py
index 8938f4c..96eb2ed 100644
--- a/yt_dlp/utils/traversal.py
+++ b/yt_dlp/utils/traversal.py
@@ -1,5 +1,6 @@
import collections.abc
import contextlib
+import http.cookies
import inspect
import itertools
import re
@@ -28,7 +29,8 @@ def traverse_obj(
Each of the provided `paths` is tested and the first producing a valid result will be returned.
The next path will also be tested if the path branched but no results could be found.
- Supported values for traversal are `Mapping`, `Iterable` and `re.Match`.
+ Supported values for traversal are `Mapping`, `Iterable`, `re.Match`,
+ `xml.etree.ElementTree` (xpath) and `http.cookies.Morsel`.
Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded.
The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`.
@@ -36,8 +38,8 @@ def traverse_obj(
The keys in the path can be one of:
- `None`: Return the current object.
- `set`: Requires the only item in the set to be a type or function,
- like `{type}`/`{func}`. If a `type`, returns only values
- of this type. If a function, returns `func(obj)`.
+ like `{type}`/`{type, type, ...}/`{func}`. If a `type`, return only
+ values of this type. If a function, returns `func(obj)`.
- `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`.
- `slice`: Branch out and return all values in `obj[key]`.
- `Ellipsis`: Branch out and return a list of all values.
@@ -48,8 +50,10 @@ def traverse_obj(
For `Iterable`s, `key` is the index of the value.
For `re.Match`es, `key` is the group number (0 = full match)
as well as additionally any group names, if given.
- - `dict` Transform the current object and return a matching dict.
+ - `dict`: Transform the current object and return a matching dict.
Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`.
+ - `any`-builtin: Take the first matching object and return it, resetting branching.
+ - `all`-builtin: Take all matching objects and return them as a list, resetting branching.
`tuple`, `list`, and `dict` all support nested paths and branches.
@@ -102,10 +106,10 @@ def traverse_obj(
result = obj
elif isinstance(key, set):
- assert len(key) == 1, 'Set should only be used to wrap a single item'
item = next(iter(key))
- if isinstance(item, type):
- if isinstance(obj, item):
+ if len(key) > 1 or isinstance(item, type):
+ assert all(isinstance(item, type) for item in key)
+ if isinstance(obj, tuple(key)):
result = obj
else:
result = try_call(item, args=(obj,))
@@ -117,6 +121,8 @@ def traverse_obj(
elif key is ...:
branching = True
+ if isinstance(obj, http.cookies.Morsel):
+ obj = dict(obj, key=obj.key, value=obj.value)
if isinstance(obj, collections.abc.Mapping):
result = obj.values()
elif is_iterable_like(obj) or isinstance(obj, xml.etree.ElementTree.Element):
@@ -131,6 +137,8 @@ def traverse_obj(
elif callable(key):
branching = True
+ if isinstance(obj, http.cookies.Morsel):
+ obj = dict(obj, key=obj.key, value=obj.value)
if isinstance(obj, collections.abc.Mapping):
iter_obj = obj.items()
elif is_iterable_like(obj) or isinstance(obj, xml.etree.ElementTree.Element):
@@ -157,6 +165,8 @@ def traverse_obj(
} or None
elif isinstance(obj, collections.abc.Mapping):
+ if isinstance(obj, http.cookies.Morsel):
+ obj = dict(obj, key=obj.key, value=obj.value)
result = (try_call(obj.get, args=(key,)) if casesense or try_call(obj.__contains__, args=(key,)) else
next((v for k, v in obj.items() if casefold(k) == key), None))
@@ -179,7 +189,7 @@ def traverse_obj(
elif isinstance(obj, xml.etree.ElementTree.Element) and isinstance(key, str):
xpath, _, special = key.rpartition('/')
- if not special.startswith('@') and special != 'text()':
+ if not special.startswith('@') and not special.endswith('()'):
xpath = key
special = None
@@ -198,7 +208,7 @@ def traverse_obj(
return try_call(element.attrib.get, args=(special[1:],))
if special == 'text()':
return element.text
- assert False, f'apply_specials is missing case for {special!r}'
+ raise SyntaxError(f'apply_specials is missing case for {special!r}')
if xpath:
result = list(map(apply_specials, obj.iterfind(xpath)))
@@ -228,6 +238,15 @@ def traverse_obj(
if not casesense and isinstance(key, str):
key = key.casefold()
+ if key in (any, all):
+ has_branched = False
+ filtered_objs = (obj for obj in objs if obj not in (None, {}))
+ if key is any:
+ objs = (next(filtered_objs, None),)
+ else:
+ objs = (list(filtered_objs),)
+ continue
+
if __debug__ and callable(key):
# Verify function signature
inspect.signature(key).bind(None, None)
diff --git a/yt_dlp/version.py b/yt_dlp/version.py
index 68c3f00..22c2c04 100644
--- a/yt_dlp/version.py
+++ b/yt_dlp/version.py
@@ -1,8 +1,8 @@
# Autogenerated by devscripts/update-version.py
-__version__ = '2024.03.10'
+__version__ = '2024.04.09'
-RELEASE_GIT_HEAD = '615a84447e8322720be77a0e64298d7f42848693'
+RELEASE_GIT_HEAD = 'ff07792676f404ffff6ee61b5638c9dc1a33a37a'
VARIANT = None
@@ -12,4 +12,4 @@ CHANNEL = 'stable'
ORIGIN = 'yt-dlp/yt-dlp'
-_pkg_version = '2024.03.10'
+_pkg_version = '2024.04.09'