summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 17:47:36 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 17:47:36 +0000
commit3c7813683b1845959aca706eaa23f062a006356b (patch)
treeecba42f14f0c919d94332e2633d9b0e6834c9cec
parentInitial commit. (diff)
downloadparamiko-upstream.tar.xz
paramiko-upstream.zip
Adding upstream version 3.4.0.upstream/3.4.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.bzrignore7
-rw-r--r--.circleci/config.yml107
-rw-r--r--.codecov.yml3
-rw-r--r--.codespellrc7
-rw-r--r--.coveragerc6
-rw-r--r--.flake89
-rw-r--r--.git-blame-ignore-revs2
-rw-r--r--.github/FUNDING.yml3
-rw-r--r--.github/ISSUE_TEMPLATE/10_support_request.yml113
-rw-r--r--.github/ISSUE_TEMPLATE/20_bug_report.yml120
-rw-r--r--.github/ISSUE_TEMPLATE/30_feature_request.yml73
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml5
-rw-r--r--.gitignore14
-rw-r--r--Dockerfile.i38610
-rw-r--r--LICENSE504
-rw-r--r--MANIFEST.in5
-rw-r--r--README.rst51
-rw-r--r--SECURITY.md7
-rw-r--r--TODO3
-rw-r--r--codecov.yml1
-rwxr-xr-xdemos/demo.py187
-rwxr-xr-xdemos/demo_keygen.py174
-rw-r--r--demos/demo_server.py185
-rw-r--r--demos/demo_sftp.py143
-rw-r--r--demos/demo_simple.py116
-rw-r--r--demos/forward.py257
-rw-r--r--demos/interactive.py101
-rwxr-xr-xdemos/rforward.py230
-rw-r--r--demos/test_rsa.key15
-rw-r--r--demos/user_rsa_key15
-rw-r--r--demos/user_rsa_key.pub1
-rw-r--r--dev-requirements.txt24
-rw-r--r--images/paramiko-banner.pngbin0 -> 2885 bytes
-rw-r--r--images/paramiko-banner.psdbin0 -> 52177 bytes
-rw-r--r--images/paramiko.pngbin0 -> 2016 bytes
-rw-r--r--paramiko/__init__.py165
-rw-r--r--paramiko/_version.py2
-rw-r--r--paramiko/_winapi.py413
-rw-r--r--paramiko/agent.py497
-rw-r--r--paramiko/auth_handler.py1092
-rw-r--r--paramiko/auth_strategy.py306
-rw-r--r--paramiko/ber.py139
-rw-r--r--paramiko/buffered_pipe.py212
-rw-r--r--paramiko/channel.py1390
-rw-r--r--paramiko/client.py893
-rw-r--r--paramiko/common.py245
-rw-r--r--paramiko/compress.py40
-rw-r--r--paramiko/config.py696
-rw-r--r--paramiko/dsskey.py258
-rw-r--r--paramiko/ecdsakey.py339
-rw-r--r--paramiko/ed25519key.py212
-rw-r--r--paramiko/file.py528
-rw-r--r--paramiko/hostkeys.py384
-rw-r--r--paramiko/kex_curve25519.py131
-rw-r--r--paramiko/kex_ecdh_nist.py151
-rw-r--r--paramiko/kex_gex.py288
-rw-r--r--paramiko/kex_group1.py155
-rw-r--r--paramiko/kex_group14.py40
-rw-r--r--paramiko/kex_group16.py35
-rw-r--r--paramiko/kex_gss.py686
-rw-r--r--paramiko/message.py318
-rw-r--r--paramiko/packet.py649
-rw-r--r--paramiko/pipe.py148
-rw-r--r--paramiko/pkey.py938
-rw-r--r--paramiko/primes.py148
-rw-r--r--paramiko/proxy.py134
-rw-r--r--paramiko/rsakey.py227
-rw-r--r--paramiko/server.py732
-rw-r--r--paramiko/sftp.py224
-rw-r--r--paramiko/sftp_attr.py239
-rw-r--r--paramiko/sftp_client.py965
-rw-r--r--paramiko/sftp_file.py594
-rw-r--r--paramiko/sftp_handle.py196
-rw-r--r--paramiko/sftp_server.py537
-rw-r--r--paramiko/sftp_si.py316
-rw-r--r--paramiko/ssh_exception.py250
-rw-r--r--paramiko/ssh_gss.py778
-rw-r--r--paramiko/transport.py3389
-rw-r--r--paramiko/util.py337
-rw-r--r--paramiko/win_openssh.py56
-rw-r--r--paramiko/win_pageant.py138
-rw-r--r--pytest.ini3
-rw-r--r--setup.py95
-rw-r--r--setup_helper.py160
-rw-r--r--sites/docs/.readthedocs.yaml13
-rw-r--r--sites/docs/api/agent.rst6
-rw-r--r--sites/docs/api/auth.rst8
-rw-r--r--sites/docs/api/buffered_pipe.rst4
-rw-r--r--sites/docs/api/channel.rst4
-rw-r--r--sites/docs/api/client.rst5
-rw-r--r--sites/docs/api/config.rst135
-rw-r--r--sites/docs/api/file.rst4
-rw-r--r--sites/docs/api/hostkeys.rst5
-rw-r--r--sites/docs/api/kex_gss.rst5
-rw-r--r--sites/docs/api/keys.rst28
-rw-r--r--sites/docs/api/message.rst4
-rw-r--r--sites/docs/api/packet.rst4
-rw-r--r--sites/docs/api/pipe.rst4
-rw-r--r--sites/docs/api/proxy.rst4
-rw-r--r--sites/docs/api/server.rst5
-rw-r--r--sites/docs/api/sftp.rst13
-rw-r--r--sites/docs/api/ssh_exception.rst4
-rw-r--r--sites/docs/api/ssh_gss.rst17
-rw-r--r--sites/docs/api/transport.rst5
-rw-r--r--sites/docs/conf.py28
-rw-r--r--sites/docs/index.rst75
-rw-r--r--sites/shared_conf.py33
-rw-r--r--sites/www/.readthedocs.yaml13
-rw-r--r--sites/www/_templates/rss.xml19
-rw-r--r--sites/www/changelog.rst1668
-rw-r--r--sites/www/conf.py28
-rw-r--r--sites/www/contact.rst12
-rw-r--r--sites/www/contributing.rst25
-rw-r--r--sites/www/faq.rst36
-rw-r--r--sites/www/index.rst13
-rw-r--r--sites/www/installing-1.x.rst121
-rw-r--r--sites/www/installing.rst152
-rw-r--r--tasks.py163
-rw-r--r--tests/__init__.py56
-rw-r--r--tests/_loop.py98
-rw-r--r--tests/_stub_sftp.py232
-rw-r--r--tests/_support/dss.key12
-rw-r--r--tests/_support/dss.key-cert.pub1
-rw-r--r--tests/_support/ecdsa-256.key5
-rw-r--r--tests/_support/ecdsa-256.key-cert.pub1
-rw-r--r--tests/_support/ed25519.key8
-rw-r--r--tests/_support/ed25519.key-cert.pub1
-rw-r--r--tests/_support/ed448.key4
-rw-r--r--tests/_support/rsa-lonely.key15
-rw-r--r--tests/_support/rsa-missing.key-cert.pub1
-rw-r--r--tests/_support/rsa.key15
-rw-r--r--tests/_support/rsa.key-cert.pub1
-rw-r--r--tests/_util.py468
-rw-r--r--tests/agent.py151
-rw-r--r--tests/auth.py580
-rw-r--r--tests/badhash_key1.ed25519.key7
-rw-r--r--tests/badhash_key2.ed25519.key7
-rw-r--r--tests/blank_rsa.key0
-rw-r--r--tests/configs/basic4
-rw-r--r--tests/configs/canon8
-rw-r--r--tests/configs/canon-always5
-rw-r--r--tests/configs/canon-ipv46
-rw-r--r--tests/configs/canon-local6
-rw-r--r--tests/configs/canon-local-always6
-rw-r--r--tests/configs/deep-canon11
-rw-r--r--tests/configs/deep-canon-maxdots12
-rw-r--r--tests/configs/empty-canon6
-rw-r--r--tests/configs/fallback-no6
-rw-r--r--tests/configs/fallback-yes6
-rw-r--r--tests/configs/hostname-exec-tokenized2
-rw-r--r--tests/configs/hostname-tokenized1
-rw-r--r--tests/configs/invalid1
-rw-r--r--tests/configs/match-all2
-rw-r--r--tests/configs/match-all-after-canonical5
-rw-r--r--tests/configs/match-all-and-more2
-rw-r--r--tests/configs/match-all-and-more-before2
-rw-r--r--tests/configs/match-all-before-canonical5
-rw-r--r--tests/configs/match-canonical-no7
-rw-r--r--tests/configs/match-canonical-yes5
-rw-r--r--tests/configs/match-complex17
-rw-r--r--tests/configs/match-exec16
-rw-r--r--tests/configs/match-exec-canonical10
-rw-r--r--tests/configs/match-exec-negation5
-rw-r--r--tests/configs/match-exec-no-arg2
-rw-r--r--tests/configs/match-final14
-rw-r--r--tests/configs/match-host2
-rw-r--r--tests/configs/match-host-canonicalized8
-rw-r--r--tests/configs/match-host-from-match5
-rw-r--r--tests/configs/match-host-glob2
-rw-r--r--tests/configs/match-host-glob-list8
-rw-r--r--tests/configs/match-host-name4
-rw-r--r--tests/configs/match-host-negated2
-rw-r--r--tests/configs/match-host-no-arg2
-rw-r--r--tests/configs/match-localuser14
-rw-r--r--tests/configs/match-localuser-no-arg2
-rw-r--r--tests/configs/match-orighost16
-rw-r--r--tests/configs/match-orighost-canonical5
-rw-r--r--tests/configs/match-orighost-no-arg2
-rw-r--r--tests/configs/match-user14
-rw-r--r--tests/configs/match-user-explicit4
-rw-r--r--tests/configs/match-user-no-arg2
-rw-r--r--tests/configs/multi-canon-domains5
-rw-r--r--tests/configs/no-canon5
-rw-r--r--tests/configs/robey17
-rw-r--r--tests/configs/zero-maxdots9
-rw-r--r--tests/conftest.py170
-rw-r--r--tests/pkey.py229
-rw-r--r--tests/test_buffered_pipe.py91
-rw-r--r--tests/test_channelfile.py60
-rw-r--r--tests/test_client.py837
-rw-r--r--tests/test_config.py1048
-rw-r--r--tests/test_dss_openssh.key22
-rw-r--r--tests/test_dss_password.key15
-rw-r--r--tests/test_ecdsa_384.key6
-rw-r--r--tests/test_ecdsa_384_openssh.key11
-rw-r--r--tests/test_ecdsa_521.key7
-rw-r--r--tests/test_ecdsa_password_256.key8
-rw-r--r--tests/test_ecdsa_password_384.key9
-rw-r--r--tests/test_ecdsa_password_521.key10
-rw-r--r--tests/test_ed25519-funky-padding.key7
-rw-r--r--tests/test_ed25519-funky-padding_password.key8
-rw-r--r--tests/test_ed25519_password.key8
-rw-r--r--tests/test_file.py226
-rw-r--r--tests/test_gssapi.py225
-rw-r--r--tests/test_hostkeys.py172
-rw-r--r--tests/test_kex.py668
-rw-r--r--tests/test_kex_gss.py154
-rw-r--r--tests/test_message.py113
-rw-r--r--tests/test_packetizer.py148
-rw-r--r--tests/test_pkey.py696
-rw-r--r--tests/test_proxy.py150
-rw-r--r--tests/test_rsa.key.pub1
-rw-r--r--tests/test_rsa_openssh.key28
-rw-r--r--tests/test_rsa_openssh_nopad.key27
-rw-r--r--tests/test_rsa_password.key18
-rw-r--r--tests/test_sftp.py832
-rw-r--r--tests/test_sftp_big.py416
-rw-r--r--tests/test_ssh_exception.py75
-rw-r--r--tests/test_ssh_gss.py160
-rw-r--r--tests/test_transport.py1446
-rw-r--r--tests/test_util.py136
221 files changed, 36229 insertions, 0 deletions
diff --git a/.bzrignore b/.bzrignore
new file mode 100644
index 0000000..6ce9d80
--- /dev/null
+++ b/.bzrignore
@@ -0,0 +1,7 @@
+*.pyc
+./build
+./paramiko.egg-info
+./dist
+./.project
+./paramiko.tmproj
+./test.log
diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 0000000..7214201
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,107 @@
+version: 2.1
+
+
+orbs:
+ orb: invocations/orb@1.3.1
+
+
+jobs:
+ sdist-test-suite:
+ executor:
+ name: orb/default
+ version: "3.6"
+ steps:
+ - orb/setup
+ - run: inv release.build --no-wheel --directory .
+ - run: |
+ cd dist
+ tar xzvf *.tar.gz
+ rm -v *.tar.gz
+ cd paramiko-*
+ pip install -e .
+ inv -e test
+ - orb/debug
+
+ kerberos:
+ executor:
+ name: orb/default
+ version: "3.6"
+ steps:
+ - orb/setup
+ # Required to actually see all of universe/multiverse :(
+ - run: sudo apt update
+ # System reqs to install/build gssapi c-ext & friends (who only
+ # appear to offer wheels for Windows)
+ - run: sudo apt install -y libkrb5-dev krb5-admin-server krb5-kdc
+ # Our gssapi-supporting flavor, eg gssapi, pyasn1 etc
+ - run: pip install -e '.[gssapi]'
+ # Test-only deps for Kerberos (if they are importable it triggers
+ # running the kerberos tests instead of skipping them)
+ - run: pip install k5test
+ # Do the thing, win the points!
+ - run: inv test
+ - orb/debug
+
+ # TODO: move to orb, rub on other projects too
+ spellcheck:
+ executor:
+ name: orb/default
+ version: "3.6"
+ steps:
+ - orb/setup
+ - run: codespell
+ - orb/debug
+
+
+workflows:
+ main:
+ jobs:
+ # The basics
+ - orb/lint:
+ name: Lint
+ - orb/format:
+ name: Style check
+ - spellcheck:
+ name: Spellcheck
+ # Main test run, w/ coverage, and latest-supported cryptography
+ - orb/coverage:
+ name: Test
+ # Non-coverage runs w/ other crypto versions.
+ # (Phrased as 2-dimensional matrix but 3.6 only for now to save credits)
+ - orb/test:
+ name: Test << matrix.version >> w/ << matrix.pip-overrides >>
+ matrix:
+ parameters:
+ version: ["3.6"]
+ # TODO: I don't see a nicer way to do this that doesn't require
+ # making the orb know too much about its client code...
+ # TODO: the upper end of this needs to change and/or grow more
+ # cells, periodically
+ # TODO: see if there's a non shite way to use bucketing here,
+ # somewhere between yaml, pip and bash all escapes get fucky
+ pip-overrides: ["cryptography==3.3.2", "cryptography==39.0.0"]
+ # Kerberos tests. Currently broken :(
+ #- kerberos:
+ # name: Test 3.6 w/ Kerberos support
+ # # No point testing k5 if base tests already fail
+ # requires: ["Test 3.6 (w/ coverage, latest crypto)"]
+ - orb/test-release:
+ name: Release test
+ # Ensure test suite is included in sdist & functions appropriately
+ - sdist-test-suite:
+ name: Test within sdist
+ requires:
+ - "Test"
+ - "Release test"
+ # Test other interpreters if main passed
+ - orb/test:
+ name: Test << matrix.version >>
+ requires: ["Test"]
+ matrix:
+ parameters:
+ version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ # Test doc building if main test suite passed (no real reason to spend
+ # all those credits if the main tests would also fail...)
+ - orb/docs:
+ name: "Docs"
+ requires: ["Test"]
diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 0000000..41ab9cc
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,3 @@
+comment: false
+coverage:
+ precision: 0
diff --git a/.codespellrc b/.codespellrc
new file mode 100644
index 0000000..a8d619e
--- /dev/null
+++ b/.codespellrc
@@ -0,0 +1,7 @@
+[codespell]
+# Default ignores, plus built docs and static doc sources
+skip = venvs,.venv,.git,build,*.egg-info,*.lock,*.js,*.css,docs
+# Certain words AUTHOR feels strongly about, plus various proper names that are
+# close enough to real words that they anger codespell. (NOTE: for some reason
+# codespell wants the latter listed in all-lowercase...!)
+ignore-words-list = keypair,flage,lew,welp,strat
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..90c7ab0
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,6 @@
+[run]
+branch = True
+include =
+ paramiko/*
+ tests/*
+omit = paramiko/_winapi.py
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000..1317fd5
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,9 @@
+[flake8]
+exclude = sites,.git,build,dist,demos
+# NOTE: W503, E203 are concessions to black 18.0b5 and could be reinstated
+# later if fixed on that end.
+# NOTE: E722 seems to only have started popping up on move to flake8 3.6.0 from
+# 2.4.0. Not sure why, bare excepts have been a (regrettable) thing forever...
+ignore = E124,E125,E128,E261,E301,E302,E303,E402,E721,W503,E203,E722
+max-line-length = 79
+
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000..f8f0117
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# blackening
+7f2c35052183b400827d9949a68b41c90f90a32d
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..30ce38c
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,3 @@
+# These are supported funding model platforms
+
+tidelift: "pypi/paramiko"
diff --git a/.github/ISSUE_TEMPLATE/10_support_request.yml b/.github/ISSUE_TEMPLATE/10_support_request.yml
new file mode 100644
index 0000000..1da1016
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/10_support_request.yml
@@ -0,0 +1,113 @@
+name: Support Request
+description: |
+ Use this template when you're having trouble using paramiko.
+title: "[SUPPORT] - <title>"
+labels: ["Support"]
+
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for using paramiko! We're sorry you're having trouble making it work the way you want. Please provide the information below and describe the problem you're having and we'll do our best to help.
+
+ - type: dropdown
+ id: usage_posture
+ attributes:
+ label: Are you using paramiko as a client or server?
+ multiple: false
+ options:
+ - Client
+ - Server
+ - Both
+ - Not sure
+ validations:
+ required: true
+
+ - type: dropdown
+ id: features
+ attributes:
+ label: What feature(s) aren't working right?
+ description: Select as many as are relevant
+ multiple: true
+ options:
+ - SSH
+ - SFTP
+ - Keys/auth
+ - known_hosts
+ - sshconfig
+ - Exception handling
+ - Something else
+ validations:
+ required: true
+
+ - type: input
+ id: paramiko_version
+ attributes:
+ label: What version(s) of paramiko are you using?
+ description: |
+ Find out with `$ python -c "import paramiko; print(paramiko.__version__)"`
+ placeholder: |
+ Example: 3.1.0
+ validations:
+ required: true
+
+ - type: input
+ id: python_version
+ attributes:
+ label: What version(s) of Python are you using?
+ description: |
+ Find out with `$ python -V`
+ placeholder: |
+ Example: 3.11.3
+ validations:
+ required: true
+
+ - type: input
+ id: os_info
+ attributes:
+ label: What operating system and version are you using?
+ placeholder: |
+ Example: WSL on Windows 11; or MacOS Mojave; or Ubuntu 22.10
+ validations:
+ required: true
+
+ - type: input
+ id: server_info
+ attributes:
+ label: If you're connecting as a client, which SSH server are you connecting to?
+ description: |
+ Leave this blank if you're not sure.
+ placeholder: |
+ Example: OpenSSH x.y; or Teleport vNN
+
+ - type: input
+ id: integrated_tool
+ attributes:
+ label: If you're using paramiko as part of another tool, which tool/version?
+ placeholder: |
+ Examples: Fabric, Ansible, sftputil
+
+ - type: textarea
+ id: intended_use
+ attributes:
+ label: What are you trying to do with paramiko?
+ description: |
+ Please describe in words what you are trying to do.
+ validations:
+ required: true
+
+ - type: textarea
+ id: problem_details
+ attributes:
+ label: How are you trying to do it, and what's happening instead?
+ description: |
+ Include code snippets and a description of the expected output, and be as detailed as possible. If possible, try to reduce your code examples to a minimal example that reproduces the problem/behavior.
+ validations:
+ required: true
+
+ - type: textarea
+ id: more_info
+ attributes:
+ label: Anything else?
+ description: |
+ Please provide any additional information that might help us find a solution for you.
diff --git a/.github/ISSUE_TEMPLATE/20_bug_report.yml b/.github/ISSUE_TEMPLATE/20_bug_report.yml
new file mode 100644
index 0000000..82f9066
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/20_bug_report.yml
@@ -0,0 +1,120 @@
+name: Bug Report
+description: |
+ Use this template when paramiko appears to be doing something wrong.
+title: "[BUG] - <title>"
+labels: ["Bug"]
+
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to file a bug report!
+
+ - type: dropdown
+ id: usage_posture
+ attributes:
+ label: Are you using paramiko as a client or server?
+ multiple: false
+ options:
+ - Client
+ - Server
+ - Both
+ - Exception handling
+ - Not sure
+ validations:
+ required: true
+
+ - type: dropdown
+ id: features
+ attributes:
+ label: What feature(s) aren't working right?
+ description: Select as many as are relevant
+ multiple: true
+ options:
+ - SSH
+ - SFTP
+ - Keys/auth
+ - known_hosts
+ - sshconfig
+ - Something else
+ validations:
+ required: true
+
+ - type: input
+ id: paramiko_version
+ attributes:
+ label: What version(s) of paramiko are you using?
+ description: |
+ Find out with `$ python -c "import paramiko; print(paramiko.__version__)"`
+ placeholder: |
+ Example: 3.1.0
+ validations:
+ required: true
+
+ - type: input
+ id: python_version
+ attributes:
+ label: What version(s) of Python are you using?
+ description: |
+ Find out with `$ python -V`
+ placeholder: |
+ Example: 3.11.3
+ validations:
+ required: true
+
+ - type: input
+ id: os_info
+ attributes:
+ label: What operating system and version are you using?
+ placeholder: |
+ Example: WSL on Windows 11; or MacOS Mojave; or Ubuntu 22.10
+ validations:
+ required: true
+
+ - type: input
+ id: server_info
+ attributes:
+ label: If you're connecting as a client, which SSH server are you connecting to?
+ description: |
+ Leave this blank if you're not sure.
+ placeholder: |
+ Example: OpenSSH x.y; or Teleport vNN
+
+ - type: input
+ id: integrated_tool
+ attributes:
+ label: If you're using paramiko as part of another tool, which tool/version?
+ placeholder: |
+ Examples: Fabric, Ansible, sftputil
+
+ - type: textarea
+ id: desired_behavior
+ attributes:
+ label: Expected/desired behavior
+ description: |
+ Please describe what you are trying to do with paramiko. Include code snippets and be as detailed as possible.
+ validations:
+ required: true
+
+ - type: textarea
+ id: actual_behavior
+ attributes:
+ label: Actual behavior
+ description: |
+ What is paramiko doing instead?
+ validations:
+ required: true
+
+ - type: textarea
+ id: repro
+ attributes:
+ label: How to reproduce
+ description: |
+ If possible, please provide a minimal code example that reproduces the bug.
+
+ - type: textarea
+ id: more_info
+ attributes:
+ label: Anything else?
+ description: |
+ Please provide any additional information that might help us find and fix the bug.
diff --git a/.github/ISSUE_TEMPLATE/30_feature_request.yml b/.github/ISSUE_TEMPLATE/30_feature_request.yml
new file mode 100644
index 0000000..ac3bef2
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/30_feature_request.yml
@@ -0,0 +1,73 @@
+name: Feature Request
+description: |
+ Use this template to request addition of a new paramiko feature.
+title: "[FEAT] - <title>"
+labels: ["Feature"]
+
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to let us know what you'd like added to paramiko!
+
+ - type: dropdown
+ id: usage_posture
+ attributes:
+ label: Is this feature for paramiko acting as a client or a server?
+ multiple: false
+ options:
+ - Client
+ - Server
+ - Both
+ - Not sure
+ validations:
+ required: true
+
+ - type: dropdown
+ id: features
+ attributes:
+ label: What functionality does this feature request relate to?
+ description: Select as many as are relevant
+ multiple: true
+ options:
+ - SSH
+ - SFTP
+ - Keys/auth
+ - known_hosts
+ - sshconfig
+ - Exception handling
+ - Something else
+ validations:
+ required: true
+
+ - type: input
+ id: server_info
+ attributes:
+ label: For client-side features, does this relate to a specific type of SSH server?
+ description: |
+ Leave this blank if you're not sure, or if you're requesting a server-side feature.
+ placeholder: |
+ Example: OpenSSH x.y; or Teleport vNN
+
+ - type: input
+ id: integrated_tool
+ attributes:
+ label: If you're using paramiko as part of another tool, which tool/version?
+ placeholder: |
+ Examples: Fabric, Ansible, sftputil
+
+ - type: textarea
+ id: desired_behavior
+ attributes:
+ label: Desired behavior
+ description: |
+ Please describe what you you would like paramiko to be able to do. If possible, include pseudocode or mock code snippets to illustrate the desired behavior, and be as detailed as possible.
+ validations:
+ required: true
+
+ - type: textarea
+ id: more_info
+ attributes:
+ label: Anything else?
+ description: |
+ Please provide any additional information that would be helpful to provide context for your requested feature.
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..717686a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: true
+contact_links:
+ - name: "Blank Issue Template"
+ url: https://github.com/paramiko/paramiko/issues/new
+ about: "Use this as a last resort, if none of the other issue types fit." \ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c6a6c49
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,14 @@
+*.pyc
+build/
+dist/
+.tox/
+paramiko.egg-info/
+docs/
+demos/*.log
+!sites/docs
+_build
+.coverage
+.cache
+.idea
+coverage.xml
+htmlcov
diff --git a/Dockerfile.i386 b/Dockerfile.i386
new file mode 100644
index 0000000..76418ce
--- /dev/null
+++ b/Dockerfile.i386
@@ -0,0 +1,10 @@
+# Convenience tool for testing a 32-bit related security flaw. May be
+# generalized in future to be more useful; until then, it is NOT
+# officially supported but purely a maintainer-facing artifact.
+
+FROM --platform=i386 i386/alpine:3.15
+
+RUN apk add openssl-dev python3-dev libffi-dev make cargo
+
+RUN python3 -m venv env
+RUN env/bin/pip install -U pip
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..d12bef0
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..8c6fc5d
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,5 @@
+include LICENSE setup_helper.py pytest.ini
+recursive-include docs *
+recursive-include tests *.py *.key *.pub
+recursive-include tests/configs *
+recursive-include demos *.py *.key user_rsa_key user_rsa_key.pub
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..ef7b8ec
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,51 @@
+|version| |python| |license| |ci| |coverage|
+
+.. |version| image:: https://img.shields.io/pypi/v/paramiko
+ :target: https://pypi.org/project/paramiko/
+ :alt: PyPI - Package Version
+.. |python| image:: https://img.shields.io/pypi/pyversions/paramiko
+ :target: https://pypi.org/project/paramiko/
+ :alt: PyPI - Python Version
+.. |license| image:: https://img.shields.io/pypi/l/paramiko
+ :target: https://github.com/paramiko/paramiko/blob/main/LICENSE
+ :alt: PyPI - License
+.. |ci| image:: https://img.shields.io/circleci/build/github/paramiko/paramiko/main
+ :target: https://app.circleci.com/pipelines/github/paramiko/paramiko
+ :alt: CircleCI
+.. |coverage| image:: https://img.shields.io/codecov/c/gh/paramiko/paramiko
+ :target: https://app.codecov.io/gh/paramiko/paramiko
+ :alt: Codecov
+
+Welcome to Paramiko!
+====================
+
+Paramiko is a pure-Python [#]_ (3.6+) implementation of the SSHv2 protocol
+[#]_, providing both client and server functionality. It provides the
+foundation for the high-level SSH library `Fabric <https://fabfile.org>`_,
+which is what we recommend you use for common client use-cases such as running
+remote shell commands or transferring files.
+
+Direct use of Paramiko itself is only intended for users who need
+advanced/low-level primitives or want to run an in-Python sshd.
+
+For installation information, changelogs, FAQs and similar, please visit `our
+main project website <https://paramiko.org>`_; for API details, see `the
+versioned docs <https://docs.paramiko.org>`_. Additionally, the project
+maintainer keeps a `roadmap <http://bitprophet.org/projects#roadmap>`_ on his
+personal site.
+
+.. [#]
+ Paramiko relies on `cryptography <https://cryptography.io>`_ for crypto
+ functionality, which makes use of C and Rust extensions but has many
+ precompiled options available. See `our installation page
+ <https://www.paramiko.org/installing.html>`_ for details.
+
+.. [#]
+ OpenSSH's RFC specification page is a fantastic resource and collection of
+ links that we won't bother replicating here:
+ https://www.openssh.com/specs.html
+
+ OpenSSH itself also happens to be our primary reference implementation:
+ when in doubt, we consult how they do things, unless there are good reasons
+ not to. There are always some gaps, but we do our best to reconcile them
+ when possible.
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..f9dc270
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+## Security contact information
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure.
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..4bda14a
--- /dev/null
+++ b/TODO
@@ -0,0 +1,3 @@
+* Change license to BSD for v1.8 (obtain permission from Robey)
+* Pending that, remove preamble from all files, ensure LICENSE is still correct
+* Update version stuff: use an execfile'd paramiko/_version.py
diff --git a/codecov.yml b/codecov.yml
new file mode 100644
index 0000000..69cb760
--- /dev/null
+++ b/codecov.yml
@@ -0,0 +1 @@
+comment: false
diff --git a/demos/demo.py b/demos/demo.py
new file mode 100755
index 0000000..5252db7
--- /dev/null
+++ b/demos/demo.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import base64
+from binascii import hexlify
+import getpass
+import os
+import select
+import socket
+import sys
+import time
+import traceback
+from paramiko.py3compat import input
+
+import paramiko
+
+try:
+ import interactive
+except ImportError:
+ from . import interactive
+
+
+def agent_auth(transport, username):
+ """
+ Attempt to authenticate to the given transport using any of the private
+ keys available from an SSH agent.
+ """
+
+ agent = paramiko.Agent()
+ agent_keys = agent.get_keys()
+ if len(agent_keys) == 0:
+ return
+
+ for key in agent_keys:
+ print("Trying ssh-agent key %s" % hexlify(key.get_fingerprint()))
+ try:
+ transport.auth_publickey(username, key)
+ print("... success!")
+ return
+ except paramiko.SSHException:
+ print("... nope.")
+
+
+def manual_auth(username, hostname):
+ default_auth = "p"
+ auth = input(
+ "Auth by (p)assword, (r)sa key, or (d)ss key? [%s] " % default_auth
+ )
+ if len(auth) == 0:
+ auth = default_auth
+
+ if auth == "r":
+ default_path = os.path.join(os.environ["HOME"], ".ssh", "id_rsa")
+ path = input("RSA key [%s]: " % default_path)
+ if len(path) == 0:
+ path = default_path
+ try:
+ key = paramiko.RSAKey.from_private_key_file(path)
+ except paramiko.PasswordRequiredException:
+ password = getpass.getpass("RSA key password: ")
+ key = paramiko.RSAKey.from_private_key_file(path, password)
+ t.auth_publickey(username, key)
+ elif auth == "d":
+ default_path = os.path.join(os.environ["HOME"], ".ssh", "id_dsa")
+ path = input("DSS key [%s]: " % default_path)
+ if len(path) == 0:
+ path = default_path
+ try:
+ key = paramiko.DSSKey.from_private_key_file(path)
+ except paramiko.PasswordRequiredException:
+ password = getpass.getpass("DSS key password: ")
+ key = paramiko.DSSKey.from_private_key_file(path, password)
+ t.auth_publickey(username, key)
+ else:
+ pw = getpass.getpass("Password for %s@%s: " % (username, hostname))
+ t.auth_password(username, pw)
+
+
+# setup logging
+paramiko.util.log_to_file("demo.log")
+
+username = ""
+if len(sys.argv) > 1:
+ hostname = sys.argv[1]
+ if hostname.find("@") >= 0:
+ username, hostname = hostname.split("@")
+else:
+ hostname = input("Hostname: ")
+if len(hostname) == 0:
+ print("*** Hostname required.")
+ sys.exit(1)
+port = 22
+if hostname.find(":") >= 0:
+ hostname, portstr = hostname.split(":")
+ port = int(portstr)
+
+# now connect
+try:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect((hostname, port))
+except Exception as e:
+ print("*** Connect failed: " + str(e))
+ traceback.print_exc()
+ sys.exit(1)
+
+try:
+ t = paramiko.Transport(sock)
+ try:
+ t.start_client()
+ except paramiko.SSHException:
+ print("*** SSH negotiation failed.")
+ sys.exit(1)
+
+ try:
+ keys = paramiko.util.load_host_keys(
+ os.path.expanduser("~/.ssh/known_hosts")
+ )
+ except IOError:
+ try:
+ keys = paramiko.util.load_host_keys(
+ os.path.expanduser("~/ssh/known_hosts")
+ )
+ except IOError:
+ print("*** Unable to open host keys file")
+ keys = {}
+
+ # check server's host key -- this is important.
+ key = t.get_remote_server_key()
+ if hostname not in keys:
+ print("*** WARNING: Unknown host key!")
+ elif key.get_name() not in keys[hostname]:
+ print("*** WARNING: Unknown host key!")
+ elif keys[hostname][key.get_name()] != key:
+ print("*** WARNING: Host key has changed!!!")
+ sys.exit(1)
+ else:
+ print("*** Host key OK.")
+
+ # get username
+ if username == "":
+ default_username = getpass.getuser()
+ username = input("Username [%s]: " % default_username)
+ if len(username) == 0:
+ username = default_username
+
+ agent_auth(t, username)
+ if not t.is_authenticated():
+ manual_auth(username, hostname)
+ if not t.is_authenticated():
+ print("*** Authentication failed. :(")
+ t.close()
+ sys.exit(1)
+
+ chan = t.open_session()
+ chan.get_pty()
+ chan.invoke_shell()
+ print("*** Here we go!\n")
+ interactive.interactive_shell(chan)
+ chan.close()
+ t.close()
+
+except Exception as e:
+ print("*** Caught exception: " + str(e.__class__) + ": " + str(e))
+ traceback.print_exc()
+ try:
+ t.close()
+ except:
+ pass
+ sys.exit(1)
diff --git a/demos/demo_keygen.py b/demos/demo_keygen.py
new file mode 100755
index 0000000..12637ed
--- /dev/null
+++ b/demos/demo_keygen.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2010 Sofian Brabez <sbz@6dev.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+
+from binascii import hexlify
+from optparse import OptionParser
+
+from paramiko import DSSKey
+from paramiko import RSAKey
+from paramiko.ssh_exception import SSHException
+from paramiko.py3compat import u
+
+usage = """
+%prog [-v] [-b bits] -t type [-N new_passphrase] [-f output_keyfile]"""
+
+default_values = {
+ "ktype": "dsa",
+ "bits": 1024,
+ "filename": "output",
+ "comment": "",
+}
+
+key_dispatch_table = {"dsa": DSSKey, "rsa": RSAKey}
+
+
+def progress(arg=None):
+
+ if not arg:
+ sys.stdout.write("0%\x08\x08\x08 ")
+ sys.stdout.flush()
+ elif arg[0] == "p":
+ sys.stdout.write("25%\x08\x08\x08\x08 ")
+ sys.stdout.flush()
+ elif arg[0] == "h":
+ sys.stdout.write("50%\x08\x08\x08\x08 ")
+ sys.stdout.flush()
+ elif arg[0] == "x":
+ sys.stdout.write("75%\x08\x08\x08\x08 ")
+ sys.stdout.flush()
+
+
+if __name__ == "__main__":
+
+ phrase = None
+ pfunc = None
+
+ parser = OptionParser(usage=usage)
+ parser.add_option(
+ "-t",
+ "--type",
+ type="string",
+ dest="ktype",
+ help="Specify type of key to create (dsa or rsa)",
+ metavar="ktype",
+ default=default_values["ktype"],
+ )
+ parser.add_option(
+ "-b",
+ "--bits",
+ type="int",
+ dest="bits",
+ help="Number of bits in the key to create",
+ metavar="bits",
+ default=default_values["bits"],
+ )
+ parser.add_option(
+ "-N",
+ "--new-passphrase",
+ dest="newphrase",
+ help="Provide new passphrase",
+ metavar="phrase",
+ )
+ parser.add_option(
+ "-P",
+ "--old-passphrase",
+ dest="oldphrase",
+ help="Provide old passphrase",
+ metavar="phrase",
+ )
+ parser.add_option(
+ "-f",
+ "--filename",
+ type="string",
+ dest="filename",
+ help="Filename of the key file",
+ metavar="filename",
+ default=default_values["filename"],
+ )
+ parser.add_option(
+ "-q", "--quiet", default=False, action="store_false", help="Quiet"
+ )
+ parser.add_option(
+ "-v", "--verbose", default=False, action="store_true", help="Verbose"
+ )
+ parser.add_option(
+ "-C",
+ "--comment",
+ type="string",
+ dest="comment",
+ help="Provide a new comment",
+ metavar="comment",
+ default=default_values["comment"],
+ )
+
+ (options, args) = parser.parse_args()
+
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(0)
+
+ for o in list(default_values.keys()):
+ globals()[o] = getattr(options, o, default_values[o.lower()])
+
+ if options.newphrase:
+ phrase = getattr(options, "newphrase")
+
+ if options.verbose:
+ pfunc = progress
+ sys.stdout.write(
+ "Generating priv/pub %s %d bits key pair (%s/%s.pub)..."
+ % (ktype, bits, filename, filename)
+ )
+ sys.stdout.flush()
+
+ if ktype == "dsa" and bits > 1024:
+ raise SSHException("DSA Keys must be 1024 bits")
+
+ if ktype not in key_dispatch_table:
+ raise SSHException(
+ "Unknown %s algorithm to generate keys pair" % ktype
+ )
+
+ # generating private key
+ prv = key_dispatch_table[ktype].generate(bits=bits, progress_func=pfunc)
+ prv.write_private_key_file(filename, password=phrase)
+
+ # generating public key
+ pub = key_dispatch_table[ktype](filename=filename, password=phrase)
+ with open("%s.pub" % filename, "w") as f:
+ f.write("%s %s" % (pub.get_name(), pub.get_base64()))
+ if options.comment:
+ f.write(" %s" % comment)
+
+ if options.verbose:
+ print("done.")
+
+ hash = u(hexlify(pub.get_fingerprint()))
+ print(
+ "Fingerprint: %d %s %s.pub (%s)"
+ % (
+ bits,
+ ":".join([hash[i : 2 + i] for i in range(0, len(hash), 2)]),
+ filename,
+ ktype.upper(),
+ )
+ )
diff --git a/demos/demo_server.py b/demos/demo_server.py
new file mode 100644
index 0000000..6cb2dc5
--- /dev/null
+++ b/demos/demo_server.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import base64
+from binascii import hexlify
+import os
+import socket
+import sys
+import threading
+import traceback
+
+import paramiko
+from paramiko.py3compat import b, u, decodebytes
+
+
+# setup logging
+paramiko.util.log_to_file("demo_server.log")
+
+host_key = paramiko.RSAKey(filename="test_rsa.key")
+# host_key = paramiko.DSSKey(filename='test_dss.key')
+
+print("Read key: " + u(hexlify(host_key.get_fingerprint())))
+
+
+class Server(paramiko.ServerInterface):
+ # 'data' is the output of base64.b64encode(key)
+ # (using the "user_rsa_key" files)
+ data = (
+ b"AAAAB3NzaC1yc2EAAAABIwAAAIEAyO4it3fHlmGZWJaGrfeHOVY7RWO3P9M7hp"
+ b"fAu7jJ2d7eothvfeuoRFtJwhUmZDluRdFyhFY/hFAh76PJKGAusIqIQKlkJxMC"
+ b"KDqIexkgHAfID/6mqvmnSJf0b5W8v5h2pI/stOSwTQ+pxVhwJ9ctYDhRSlF0iT"
+ b"UWT10hcuO4Ks8="
+ )
+ good_pub_key = paramiko.RSAKey(data=decodebytes(data))
+
+ def __init__(self):
+ self.event = threading.Event()
+
+ def check_channel_request(self, kind, chanid):
+ if kind == "session":
+ return paramiko.OPEN_SUCCEEDED
+ return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+
+ def check_auth_password(self, username, password):
+ if (username == "robey") and (password == "foo"):
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def check_auth_publickey(self, username, key):
+ print("Auth attempt with key: " + u(hexlify(key.get_fingerprint())))
+ if (username == "robey") and (key == self.good_pub_key):
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def check_auth_gssapi_with_mic(
+ self, username, gss_authenticated=paramiko.AUTH_FAILED, cc_file=None
+ ):
+ """
+ .. note::
+ We are just checking in `AuthHandler` that the given user is a
+ valid krb5 principal! We don't check if the krb5 principal is
+ allowed to log in on the server, because there is no way to do that
+ in python. So if you develop your own SSH server with paramiko for
+ a certain platform like Linux, you should call ``krb5_kuserok()`` in
+ your local kerberos library to make sure that the krb5_principal
+ has an account on the server and is allowed to log in as a user.
+
+ .. seealso::
+ `krb5_kuserok() man page
+ <http://www.unix.com/man-page/all/3/krb5_kuserok/>`_
+ """
+ if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def check_auth_gssapi_keyex(
+ self, username, gss_authenticated=paramiko.AUTH_FAILED, cc_file=None
+ ):
+ if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def enable_auth_gssapi(self):
+ return True
+
+ def get_allowed_auths(self, username):
+ return "gssapi-keyex,gssapi-with-mic,password,publickey"
+
+ def check_channel_shell_request(self, channel):
+ self.event.set()
+ return True
+
+ def check_channel_pty_request(
+ self, channel, term, width, height, pixelwidth, pixelheight, modes
+ ):
+ return True
+
+
+DoGSSAPIKeyExchange = True
+
+# now connect
+try:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(("", 2200))
+except Exception as e:
+ print("*** Bind failed: " + str(e))
+ traceback.print_exc()
+ sys.exit(1)
+
+try:
+ sock.listen(100)
+ print("Listening for connection ...")
+ client, addr = sock.accept()
+except Exception as e:
+ print("*** Listen/accept failed: " + str(e))
+ traceback.print_exc()
+ sys.exit(1)
+
+print("Got a connection!")
+
+try:
+ t = paramiko.Transport(client, gss_kex=DoGSSAPIKeyExchange)
+ t.set_gss_host(socket.getfqdn(""))
+ try:
+ t.load_server_moduli()
+ except:
+ print("(Failed to load moduli -- gex will be unsupported.)")
+ raise
+ t.add_server_key(host_key)
+ server = Server()
+ try:
+ t.start_server(server=server)
+ except paramiko.SSHException:
+ print("*** SSH negotiation failed.")
+ sys.exit(1)
+
+ # wait for auth
+ chan = t.accept(20)
+ if chan is None:
+ print("*** No channel.")
+ sys.exit(1)
+ print("Authenticated!")
+
+ server.event.wait(10)
+ if not server.event.is_set():
+ print("*** Client never asked for a shell.")
+ sys.exit(1)
+
+ chan.send("\r\n\r\nWelcome to my dorky little BBS!\r\n\r\n")
+ chan.send(
+ "We are on fire all the time! Hooray! Candy corn for everyone!\r\n"
+ )
+ chan.send("Happy birthday to Robot Dave!\r\n\r\n")
+ chan.send("Username: ")
+ f = chan.makefile("rU")
+ username = f.readline().strip("\r\n")
+ chan.send("\r\nI don't like you, " + username + ".\r\n")
+ chan.close()
+
+except Exception as e:
+ print("*** Caught exception: " + str(e.__class__) + ": " + str(e))
+ traceback.print_exc()
+ try:
+ t.close()
+ except:
+ pass
+ sys.exit(1)
diff --git a/demos/demo_sftp.py b/demos/demo_sftp.py
new file mode 100644
index 0000000..dbcb2cb
--- /dev/null
+++ b/demos/demo_sftp.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# based on code provided by raymond mosteller (thanks!)
+
+import base64
+import getpass
+import os
+import socket
+import sys
+import traceback
+
+import paramiko
+from paramiko.py3compat import input
+
+
+# setup logging
+paramiko.util.log_to_file("demo_sftp.log")
+
+# Paramiko client configuration
+UseGSSAPI = True # enable GSS-API / SSPI authentication
+DoGSSAPIKeyExchange = True
+Port = 22
+
+# get hostname
+username = ""
+if len(sys.argv) > 1:
+ hostname = sys.argv[1]
+ if hostname.find("@") >= 0:
+ username, hostname = hostname.split("@")
+else:
+ hostname = input("Hostname: ")
+if len(hostname) == 0:
+ print("*** Hostname required.")
+ sys.exit(1)
+
+if hostname.find(":") >= 0:
+ hostname, portstr = hostname.split(":")
+ Port = int(portstr)
+
+
+# get username
+if username == "":
+ default_username = getpass.getuser()
+ username = input("Username [%s]: " % default_username)
+ if len(username) == 0:
+ username = default_username
+if not UseGSSAPI:
+ password = getpass.getpass("Password for %s@%s: " % (username, hostname))
+else:
+ password = None
+
+
+# get host key, if we know one
+hostkeytype = None
+hostkey = None
+try:
+ host_keys = paramiko.util.load_host_keys(
+ os.path.expanduser("~/.ssh/known_hosts")
+ )
+except IOError:
+ try:
+ # try ~/ssh/ too, because windows can't have a folder named ~/.ssh/
+ host_keys = paramiko.util.load_host_keys(
+ os.path.expanduser("~/ssh/known_hosts")
+ )
+ except IOError:
+ print("*** Unable to open host keys file")
+ host_keys = {}
+
+if hostname in host_keys:
+ hostkeytype = host_keys[hostname].keys()[0]
+ hostkey = host_keys[hostname][hostkeytype]
+ print("Using host key of type %s" % hostkeytype)
+
+
+# now, connect and use paramiko Transport to negotiate SSH2 across the connection
+try:
+ t = paramiko.Transport((hostname, Port))
+ t.connect(
+ hostkey,
+ username,
+ password,
+ gss_host=socket.getfqdn(hostname),
+ gss_auth=UseGSSAPI,
+ gss_kex=DoGSSAPIKeyExchange,
+ )
+ sftp = paramiko.SFTPClient.from_transport(t)
+
+ # dirlist on remote host
+ dirlist = sftp.listdir(".")
+ print("Dirlist: %s" % dirlist)
+
+ # copy this demo onto the server
+ try:
+ sftp.mkdir("demo_sftp_folder")
+ except IOError:
+ print("(assuming demo_sftp_folder/ already exists)")
+ with sftp.open("demo_sftp_folder/README", "w") as f:
+ f.write("This was created by demo_sftp.py.\n")
+ with open("demo_sftp.py", "r") as f:
+ data = f.read()
+ sftp.open("demo_sftp_folder/demo_sftp.py", "w").write(data)
+ print("created demo_sftp_folder/ on the server")
+
+ # copy the README back here
+ with sftp.open("demo_sftp_folder/README", "r") as f:
+ data = f.read()
+ with open("README_demo_sftp", "w") as f:
+ f.write(data)
+ print("copied README back here")
+
+ # BETTER: use the get() and put() methods
+ sftp.put("demo_sftp.py", "demo_sftp_folder/demo_sftp.py")
+ sftp.get("demo_sftp_folder/README", "README_demo_sftp")
+
+ t.close()
+
+except Exception as e:
+ print("*** Caught exception: %s: %s" % (e.__class__, e))
+ traceback.print_exc()
+ try:
+ t.close()
+ except:
+ pass
+ sys.exit(1)
diff --git a/demos/demo_simple.py b/demos/demo_simple.py
new file mode 100644
index 0000000..bd932c3
--- /dev/null
+++ b/demos/demo_simple.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import base64
+import getpass
+import os
+import socket
+import sys
+import traceback
+from paramiko.py3compat import input
+
+import paramiko
+
+try:
+ import interactive
+except ImportError:
+ from . import interactive
+
+
+# setup logging
+paramiko.util.log_to_file("demo_simple.log")
+# Paramiko client configuration
+UseGSSAPI = (
+ paramiko.GSS_AUTH_AVAILABLE
+) # enable "gssapi-with-mic" authentication, if supported by your python installation
+DoGSSAPIKeyExchange = (
+ paramiko.GSS_AUTH_AVAILABLE
+) # enable "gssapi-kex" key exchange, if supported by your python installation
+# UseGSSAPI = False
+# DoGSSAPIKeyExchange = False
+port = 22
+
+# get hostname
+username = ""
+if len(sys.argv) > 1:
+ hostname = sys.argv[1]
+ if hostname.find("@") >= 0:
+ username, hostname = hostname.split("@")
+else:
+ hostname = input("Hostname: ")
+if len(hostname) == 0:
+ print("*** Hostname required.")
+ sys.exit(1)
+
+if hostname.find(":") >= 0:
+ hostname, portstr = hostname.split(":")
+ port = int(portstr)
+
+
+# get username
+if username == "":
+ default_username = getpass.getuser()
+ username = input("Username [%s]: " % default_username)
+ if len(username) == 0:
+ username = default_username
+if not UseGSSAPI and not DoGSSAPIKeyExchange:
+ password = getpass.getpass("Password for %s@%s: " % (username, hostname))
+
+
+# now, connect and use paramiko Client to negotiate SSH2 across the connection
+try:
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+ print("*** Connecting...")
+ if not UseGSSAPI and not DoGSSAPIKeyExchange:
+ client.connect(hostname, port, username, password)
+ else:
+ try:
+ client.connect(
+ hostname,
+ port,
+ username,
+ gss_auth=UseGSSAPI,
+ gss_kex=DoGSSAPIKeyExchange,
+ )
+ except Exception:
+ # traceback.print_exc()
+ password = getpass.getpass(
+ "Password for %s@%s: " % (username, hostname)
+ )
+ client.connect(hostname, port, username, password)
+
+ chan = client.invoke_shell()
+ print(repr(client.get_transport()))
+ print("*** Here we go!\n")
+ interactive.interactive_shell(chan)
+ chan.close()
+ client.close()
+
+except Exception as e:
+ print("*** Caught exception: %s: %s" % (e.__class__, e))
+ traceback.print_exc()
+ try:
+ client.close()
+ except:
+ pass
+ sys.exit(1)
diff --git a/demos/forward.py b/demos/forward.py
new file mode 100644
index 0000000..869e390
--- /dev/null
+++ b/demos/forward.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Sample script showing how to do local port forwarding over paramiko.
+
+This script connects to the requested SSH server and sets up local port
+forwarding (the openssh -L option) from a local port through a tunneled
+connection to a destination reachable from the SSH server machine.
+"""
+
+import getpass
+import os
+import socket
+import select
+
+try:
+ import SocketServer
+except ImportError:
+ import socketserver as SocketServer
+
+import sys
+from optparse import OptionParser
+
+import paramiko
+
+SSH_PORT = 22
+DEFAULT_PORT = 4000
+
+g_verbose = True
+
+
+class ForwardServer(SocketServer.ThreadingTCPServer):
+ daemon_threads = True
+ allow_reuse_address = True
+
+
+class Handler(SocketServer.BaseRequestHandler):
+ def handle(self):
+ try:
+ chan = self.ssh_transport.open_channel(
+ "direct-tcpip",
+ (self.chain_host, self.chain_port),
+ self.request.getpeername(),
+ )
+ except Exception as e:
+ verbose(
+ "Incoming request to %s:%d failed: %s"
+ % (self.chain_host, self.chain_port, repr(e))
+ )
+ return
+ if chan is None:
+ verbose(
+ "Incoming request to %s:%d was rejected by the SSH server."
+ % (self.chain_host, self.chain_port)
+ )
+ return
+
+ verbose(
+ "Connected! Tunnel open %r -> %r -> %r"
+ % (
+ self.request.getpeername(),
+ chan.getpeername(),
+ (self.chain_host, self.chain_port),
+ )
+ )
+ while True:
+ r, w, x = select.select([self.request, chan], [], [])
+ if self.request in r:
+ data = self.request.recv(1024)
+ if len(data) == 0:
+ break
+ chan.send(data)
+ if chan in r:
+ data = chan.recv(1024)
+ if len(data) == 0:
+ break
+ self.request.send(data)
+
+ peername = self.request.getpeername()
+ chan.close()
+ self.request.close()
+ verbose("Tunnel closed from %r" % (peername,))
+
+
+def forward_tunnel(local_port, remote_host, remote_port, transport):
+ # this is a little convoluted, but lets me configure things for the Handler
+ # object. (SocketServer doesn't give Handlers any way to access the outer
+ # server normally.)
+ class SubHander(Handler):
+ chain_host = remote_host
+ chain_port = remote_port
+ ssh_transport = transport
+
+ ForwardServer(("", local_port), SubHander).serve_forever()
+
+
+def verbose(s):
+ if g_verbose:
+ print(s)
+
+
+HELP = """\
+Set up a forward tunnel across an SSH server, using paramiko. A local port
+(given with -p) is forwarded across an SSH session to an address:port from
+the SSH server. This is similar to the openssh -L option.
+"""
+
+
+def get_host_port(spec, default_port):
+ "parse 'hostname:22' into a host and port, with the port optional"
+ args = (spec.split(":", 1) + [default_port])[:2]
+ args[1] = int(args[1])
+ return args[0], args[1]
+
+
+def parse_options():
+ global g_verbose
+
+ parser = OptionParser(
+ usage="usage: %prog [options] <ssh-server>[:<server-port>]",
+ version="%prog 1.0",
+ description=HELP,
+ )
+ parser.add_option(
+ "-q",
+ "--quiet",
+ action="store_false",
+ dest="verbose",
+ default=True,
+ help="squelch all informational output",
+ )
+ parser.add_option(
+ "-p",
+ "--local-port",
+ action="store",
+ type="int",
+ dest="port",
+ default=DEFAULT_PORT,
+ help="local port to forward (default: %d)" % DEFAULT_PORT,
+ )
+ parser.add_option(
+ "-u",
+ "--user",
+ action="store",
+ type="string",
+ dest="user",
+ default=getpass.getuser(),
+ help="username for SSH authentication (default: %s)"
+ % getpass.getuser(),
+ )
+ parser.add_option(
+ "-K",
+ "--key",
+ action="store",
+ type="string",
+ dest="keyfile",
+ default=None,
+ help="private key file to use for SSH authentication",
+ )
+ parser.add_option(
+ "",
+ "--no-key",
+ action="store_false",
+ dest="look_for_keys",
+ default=True,
+ help="don't look for or use a private key file",
+ )
+ parser.add_option(
+ "-P",
+ "--password",
+ action="store_true",
+ dest="readpass",
+ default=False,
+ help="read password (for key or password auth) from stdin",
+ )
+ parser.add_option(
+ "-r",
+ "--remote",
+ action="store",
+ type="string",
+ dest="remote",
+ default=None,
+ metavar="host:port",
+ help="remote host and port to forward to",
+ )
+ options, args = parser.parse_args()
+
+ if len(args) != 1:
+ parser.error("Incorrect number of arguments.")
+ if options.remote is None:
+ parser.error("Remote address required (-r).")
+
+ g_verbose = options.verbose
+ server_host, server_port = get_host_port(args[0], SSH_PORT)
+ remote_host, remote_port = get_host_port(options.remote, SSH_PORT)
+ return options, (server_host, server_port), (remote_host, remote_port)
+
+
+def main():
+ options, server, remote = parse_options()
+
+ password = None
+ if options.readpass:
+ password = getpass.getpass("Enter SSH password: ")
+
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ verbose("Connecting to ssh host %s:%d ..." % (server[0], server[1]))
+ try:
+ client.connect(
+ server[0],
+ server[1],
+ username=options.user,
+ key_filename=options.keyfile,
+ look_for_keys=options.look_for_keys,
+ password=password,
+ )
+ except Exception as e:
+ print("*** Failed to connect to %s:%d: %r" % (server[0], server[1], e))
+ sys.exit(1)
+
+ verbose(
+ "Now forwarding port %d to %s:%d ..."
+ % (options.port, remote[0], remote[1])
+ )
+
+ try:
+ forward_tunnel(
+ options.port, remote[0], remote[1], client.get_transport()
+ )
+ except KeyboardInterrupt:
+ print("C-c: Port forwarding stopped.")
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/demos/interactive.py b/demos/interactive.py
new file mode 100644
index 0000000..16eae0e
--- /dev/null
+++ b/demos/interactive.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import socket
+import sys
+from paramiko.py3compat import u
+
+# windows does not have termios...
+try:
+ import termios
+ import tty
+
+ has_termios = True
+except ImportError:
+ has_termios = False
+
+
+def interactive_shell(chan):
+ if has_termios:
+ posix_shell(chan)
+ else:
+ windows_shell(chan)
+
+
+def posix_shell(chan):
+ import select
+
+ oldtty = termios.tcgetattr(sys.stdin)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ tty.setcbreak(sys.stdin.fileno())
+ chan.settimeout(0.0)
+
+ while True:
+ r, w, e = select.select([chan, sys.stdin], [], [])
+ if chan in r:
+ try:
+ x = u(chan.recv(1024))
+ if len(x) == 0:
+ sys.stdout.write("\r\n*** EOF\r\n")
+ break
+ sys.stdout.write(x)
+ sys.stdout.flush()
+ except socket.timeout:
+ pass
+ if sys.stdin in r:
+ x = sys.stdin.read(1)
+ if len(x) == 0:
+ break
+ chan.send(x)
+
+ finally:
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
+
+
+# thanks to Mike Looijmans for this code
+def windows_shell(chan):
+ import threading
+
+ sys.stdout.write(
+ "Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n"
+ )
+
+ def writeall(sock):
+ while True:
+ data = sock.recv(256)
+ if not data:
+ sys.stdout.write("\r\n*** EOF ***\r\n\r\n")
+ sys.stdout.flush()
+ break
+ sys.stdout.write(data)
+ sys.stdout.flush()
+
+ writer = threading.Thread(target=writeall, args=(chan,))
+ writer.start()
+
+ try:
+ while True:
+ d = sys.stdin.read(1)
+ if not d:
+ break
+ chan.send(d)
+ except EOFError:
+ # user hit ^Z or F6
+ pass
diff --git a/demos/rforward.py b/demos/rforward.py
new file mode 100755
index 0000000..200634a
--- /dev/null
+++ b/demos/rforward.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2008 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Sample script showing how to do remote port forwarding over paramiko.
+
+This script connects to the requested SSH server and sets up remote port
+forwarding (the openssh -R option) from a remote port through a tunneled
+connection to a destination reachable from the local machine.
+"""
+
+import getpass
+import os
+import socket
+import select
+import sys
+import threading
+from optparse import OptionParser
+
+import paramiko
+
+SSH_PORT = 22
+DEFAULT_PORT = 4000
+
+g_verbose = True
+
+
+def handler(chan, host, port):
+ sock = socket.socket()
+ try:
+ sock.connect((host, port))
+ except Exception as e:
+ verbose("Forwarding request to %s:%d failed: %r" % (host, port, e))
+ return
+
+ verbose(
+ "Connected! Tunnel open %r -> %r -> %r"
+ % (chan.origin_addr, chan.getpeername(), (host, port))
+ )
+ while True:
+ r, w, x = select.select([sock, chan], [], [])
+ if sock in r:
+ data = sock.recv(1024)
+ if len(data) == 0:
+ break
+ chan.send(data)
+ if chan in r:
+ data = chan.recv(1024)
+ if len(data) == 0:
+ break
+ sock.send(data)
+ chan.close()
+ sock.close()
+ verbose("Tunnel closed from %r" % (chan.origin_addr,))
+
+
+def reverse_forward_tunnel(server_port, remote_host, remote_port, transport):
+ transport.request_port_forward("", server_port)
+ while True:
+ chan = transport.accept(1000)
+ if chan is None:
+ continue
+ thr = threading.Thread(
+ target=handler, args=(chan, remote_host, remote_port)
+ )
+ thr.setDaemon(True)
+ thr.start()
+
+
+def verbose(s):
+ if g_verbose:
+ print(s)
+
+
+HELP = """\
+Set up a reverse forwarding tunnel across an SSH server, using paramiko. A
+port on the SSH server (given with -p) is forwarded across an SSH session
+back to the local machine, and out to a remote site reachable from this
+network. This is similar to the openssh -R option.
+"""
+
+
+def get_host_port(spec, default_port):
+ "parse 'hostname:22' into a host and port, with the port optional"
+ args = (spec.split(":", 1) + [default_port])[:2]
+ args[1] = int(args[1])
+ return args[0], args[1]
+
+
+def parse_options():
+ global g_verbose
+
+ parser = OptionParser(
+ usage="usage: %prog [options] <ssh-server>[:<server-port>]",
+ version="%prog 1.0",
+ description=HELP,
+ )
+ parser.add_option(
+ "-q",
+ "--quiet",
+ action="store_false",
+ dest="verbose",
+ default=True,
+ help="squelch all informational output",
+ )
+ parser.add_option(
+ "-p",
+ "--remote-port",
+ action="store",
+ type="int",
+ dest="port",
+ default=DEFAULT_PORT,
+ help="port on server to forward (default: %d)" % DEFAULT_PORT,
+ )
+ parser.add_option(
+ "-u",
+ "--user",
+ action="store",
+ type="string",
+ dest="user",
+ default=getpass.getuser(),
+ help="username for SSH authentication (default: %s)"
+ % getpass.getuser(),
+ )
+ parser.add_option(
+ "-K",
+ "--key",
+ action="store",
+ type="string",
+ dest="keyfile",
+ default=None,
+ help="private key file to use for SSH authentication",
+ )
+ parser.add_option(
+ "",
+ "--no-key",
+ action="store_false",
+ dest="look_for_keys",
+ default=True,
+ help="don't look for or use a private key file",
+ )
+ parser.add_option(
+ "-P",
+ "--password",
+ action="store_true",
+ dest="readpass",
+ default=False,
+ help="read password (for key or password auth) from stdin",
+ )
+ parser.add_option(
+ "-r",
+ "--remote",
+ action="store",
+ type="string",
+ dest="remote",
+ default=None,
+ metavar="host:port",
+ help="remote host and port to forward to",
+ )
+ options, args = parser.parse_args()
+
+ if len(args) != 1:
+ parser.error("Incorrect number of arguments.")
+ if options.remote is None:
+ parser.error("Remote address required (-r).")
+
+ g_verbose = options.verbose
+ server_host, server_port = get_host_port(args[0], SSH_PORT)
+ remote_host, remote_port = get_host_port(options.remote, SSH_PORT)
+ return options, (server_host, server_port), (remote_host, remote_port)
+
+
+def main():
+ options, server, remote = parse_options()
+
+ password = None
+ if options.readpass:
+ password = getpass.getpass("Enter SSH password: ")
+
+ client = paramiko.SSHClient()
+ client.load_system_host_keys()
+ client.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+ verbose("Connecting to ssh host %s:%d ..." % (server[0], server[1]))
+ try:
+ client.connect(
+ server[0],
+ server[1],
+ username=options.user,
+ key_filename=options.keyfile,
+ look_for_keys=options.look_for_keys,
+ password=password,
+ )
+ except Exception as e:
+ print("*** Failed to connect to %s:%d: %r" % (server[0], server[1], e))
+ sys.exit(1)
+
+ verbose(
+ "Now forwarding remote port %d to %s:%d ..."
+ % (options.port, remote[0], remote[1])
+ )
+
+ try:
+ reverse_forward_tunnel(
+ options.port, remote[0], remote[1], client.get_transport()
+ )
+ except KeyboardInterrupt:
+ print("C-c: Port forwarding stopped.")
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/demos/test_rsa.key b/demos/test_rsa.key
new file mode 100644
index 0000000..f50e9c5
--- /dev/null
+++ b/demos/test_rsa.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
+oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
+d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
+gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
+EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
+soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
+tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
+avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
+4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
+H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
+qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
+HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
+nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
+-----END RSA PRIVATE KEY-----
diff --git a/demos/user_rsa_key b/demos/user_rsa_key
new file mode 100644
index 0000000..ee64f23
--- /dev/null
+++ b/demos/user_rsa_key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDI7iK3d8eWYZlYloat94c5VjtFY7c/0zuGl8C7uMnZ3t6i2G99
+66hEW0nCFSZkOW5F0XKEVj+EUCHvo8koYC6wiohAqWQnEwIoOoh7GSAcB8gP/qaq
++adIl/Rvlby/mHakj+y05LBND6nFWHAn1y1gOFFKUXSJNRZPXSFy47gqzwIBIwKB
+gQCbANjz7q/pCXZLp1Hz6tYHqOvlEmjK1iabB1oqafrMpJ0eibUX/u+FMHq6StR5
+M5413BaDWHokPdEJUnabfWXXR3SMlBUKrck0eAer1O8m78yxu3OEdpRk+znVo4DL
+guMeCdJB/qcF0kEsx+Q8HP42MZU1oCmk3PbfXNFwaHbWuwJBAOQ/ry/hLD7AqB8x
+DmCM82A9E59ICNNlHOhxpJoh6nrNTPCsBAEu/SmqrL8mS6gmbRKUaya5Lx1pkxj2
+s/kWOokCQQDhXCcYXjjWiIfxhl6Rlgkk1vmI0l6785XSJNv4P7pXjGmShXfIzroh
+S8uWK3tL0GELY7+UAKDTUEVjjQdGxYSXAkEA3bo1JzKCwJ3lJZ1ebGuqmADRO6UP
+40xH977aadfN1mEI6cusHmgpISl0nG5YH7BMsvaT+bs1FUH8m+hXDzoqOwJBAK3Z
+X/za+KV/REya2z0b+GzgWhkXUGUa/owrEBdHGriQ47osclkUgPUdNqcLmaDilAF4
+1Z4PHPrI5RJIONAx+JECQQC/fChqjBgFpk6iJ+BOdSexQpgfxH/u/457W10Y43HR
+soS+8btbHqjQkowQ/2NTlUfWvqIlfxs6ZbFsIp/HrhZL
+-----END RSA PRIVATE KEY-----
diff --git a/demos/user_rsa_key.pub b/demos/user_rsa_key.pub
new file mode 100644
index 0000000..ac722f1
--- /dev/null
+++ b/demos/user_rsa_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAyO4it3fHlmGZWJaGrfeHOVY7RWO3P9M7hpfAu7jJ2d7eothvfeuoRFtJwhUmZDluRdFyhFY/hFAh76PJKGAusIqIQKlkJxMCKDqIexkgHAfID/6mqvmnSJf0b5W8v5h2pI/stOSwTQ+pxVhwJ9ctYDhRSlF0iTUWT10hcuO4Ks8= robey@ralph.lag.net
diff --git a/dev-requirements.txt b/dev-requirements.txt
new file mode 100644
index 0000000..43d01e0
--- /dev/null
+++ b/dev-requirements.txt
@@ -0,0 +1,24 @@
+# Invocations for common project tasks
+invoke>=2.0
+invocations>=3.2
+# Testing!
+pytest-relaxed>=2
+# pytest-xdist for test dir watching and the inv guard task
+pytest-xdist>=3
+# Linting!
+flake8>=4,<5
+# Formatting!
+black>=22.8,<22.9
+# Spelling!
+# TODO Python 3.7: newer codespell has upgraded lists
+codespell>=2.2.1,<2.3
+# Coverage!
+coverage>=6.2,<7
+# Documentation tools
+alabaster==0.7.13
+releases>=2.1
+watchdog<2
+# Debuggery
+icecream>=2.1
+# Self (sans GSS which is a pain to bother with most of the time)
+-e ".[invoke]"
diff --git a/images/paramiko-banner.png b/images/paramiko-banner.png
new file mode 100644
index 0000000..e69fbc6
--- /dev/null
+++ b/images/paramiko-banner.png
Binary files differ
diff --git a/images/paramiko-banner.psd b/images/paramiko-banner.psd
new file mode 100644
index 0000000..20ebbc8
--- /dev/null
+++ b/images/paramiko-banner.psd
Binary files differ
diff --git a/images/paramiko.png b/images/paramiko.png
new file mode 100644
index 0000000..c488e2e
--- /dev/null
+++ b/images/paramiko.png
Binary files differ
diff --git a/paramiko/__init__.py b/paramiko/__init__.py
new file mode 100644
index 0000000..65148d2
--- /dev/null
+++ b/paramiko/__init__.py
@@ -0,0 +1,165 @@
+# Copyright (C) 2003-2011 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# flake8: noqa
+import sys
+from paramiko._version import __version__, __version_info__
+from paramiko.transport import (
+ SecurityOptions,
+ ServiceRequestingTransport,
+ Transport,
+)
+from paramiko.client import (
+ AutoAddPolicy,
+ MissingHostKeyPolicy,
+ RejectPolicy,
+ SSHClient,
+ WarningPolicy,
+)
+from paramiko.auth_handler import AuthHandler
+from paramiko.auth_strategy import (
+ AuthFailure,
+ AuthStrategy,
+ AuthResult,
+ AuthSource,
+ InMemoryPrivateKey,
+ NoneAuth,
+ OnDiskPrivateKey,
+ Password,
+ PrivateKey,
+ SourceResult,
+)
+from paramiko.ssh_gss import GSSAuth, GSS_AUTH_AVAILABLE, GSS_EXCEPTIONS
+from paramiko.channel import (
+ Channel,
+ ChannelFile,
+ ChannelStderrFile,
+ ChannelStdinFile,
+)
+from paramiko.ssh_exception import (
+ AuthenticationException,
+ BadAuthenticationType,
+ BadHostKeyException,
+ ChannelException,
+ ConfigParseError,
+ CouldNotCanonicalize,
+ IncompatiblePeer,
+ MessageOrderError,
+ PasswordRequiredException,
+ ProxyCommandFailure,
+ SSHException,
+)
+from paramiko.server import ServerInterface, SubsystemHandler, InteractiveQuery
+from paramiko.rsakey import RSAKey
+from paramiko.dsskey import DSSKey
+from paramiko.ecdsakey import ECDSAKey
+from paramiko.ed25519key import Ed25519Key
+from paramiko.sftp import SFTPError, BaseSFTP
+from paramiko.sftp_client import SFTP, SFTPClient
+from paramiko.sftp_server import SFTPServer
+from paramiko.sftp_attr import SFTPAttributes
+from paramiko.sftp_handle import SFTPHandle
+from paramiko.sftp_si import SFTPServerInterface
+from paramiko.sftp_file import SFTPFile
+from paramiko.message import Message
+from paramiko.packet import Packetizer
+from paramiko.file import BufferedFile
+from paramiko.agent import Agent, AgentKey
+from paramiko.pkey import PKey, PublicBlob, UnknownKeyType
+from paramiko.hostkeys import HostKeys
+from paramiko.config import SSHConfig, SSHConfigDict
+from paramiko.proxy import ProxyCommand
+
+from paramiko.common import (
+ AUTH_SUCCESSFUL,
+ AUTH_PARTIALLY_SUCCESSFUL,
+ AUTH_FAILED,
+ OPEN_SUCCEEDED,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ OPEN_FAILED_CONNECT_FAILED,
+ OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
+ OPEN_FAILED_RESOURCE_SHORTAGE,
+)
+
+from paramiko.sftp import (
+ SFTP_OK,
+ SFTP_EOF,
+ SFTP_NO_SUCH_FILE,
+ SFTP_PERMISSION_DENIED,
+ SFTP_FAILURE,
+ SFTP_BAD_MESSAGE,
+ SFTP_NO_CONNECTION,
+ SFTP_CONNECTION_LOST,
+ SFTP_OP_UNSUPPORTED,
+)
+
+from paramiko.common import io_sleep
+
+
+# TODO: I guess a real plugin system might be nice for future expansion...
+key_classes = [DSSKey, RSAKey, Ed25519Key, ECDSAKey]
+
+
+__author__ = "Jeff Forcier <jeff@bitprophet.org>"
+__license__ = "GNU Lesser General Public License (LGPL)"
+
+# TODO 4.0: remove this, jeez
+__all__ = [
+ "Agent",
+ "AgentKey",
+ "AuthenticationException",
+ "AutoAddPolicy",
+ "BadAuthenticationType",
+ "BadHostKeyException",
+ "BufferedFile",
+ "Channel",
+ "ChannelException",
+ "ConfigParseError",
+ "CouldNotCanonicalize",
+ "DSSKey",
+ "ECDSAKey",
+ "Ed25519Key",
+ "HostKeys",
+ "Message",
+ "MissingHostKeyPolicy",
+ "PKey",
+ "PasswordRequiredException",
+ "ProxyCommand",
+ "ProxyCommandFailure",
+ "RSAKey",
+ "RejectPolicy",
+ "SFTP",
+ "SFTPAttributes",
+ "SFTPClient",
+ "SFTPError",
+ "SFTPFile",
+ "SFTPHandle",
+ "SFTPServer",
+ "SFTPServerInterface",
+ "SSHClient",
+ "SSHConfig",
+ "SSHConfigDict",
+ "SSHException",
+ "SecurityOptions",
+ "ServerInterface",
+ "SubsystemHandler",
+ "Transport",
+ "WarningPolicy",
+ "io_sleep",
+ "util",
+]
diff --git a/paramiko/_version.py b/paramiko/_version.py
new file mode 100644
index 0000000..640ef72
--- /dev/null
+++ b/paramiko/_version.py
@@ -0,0 +1,2 @@
+__version_info__ = (3, 4, 0)
+__version__ = ".".join(map(str, __version_info__))
diff --git a/paramiko/_winapi.py b/paramiko/_winapi.py
new file mode 100644
index 0000000..4295457
--- /dev/null
+++ b/paramiko/_winapi.py
@@ -0,0 +1,413 @@
+"""
+Windows API functions implemented as ctypes functions and classes as found
+in jaraco.windows (3.4.1).
+
+If you encounter issues with this module, please consider reporting the issues
+in jaraco.windows and asking the author to port the fixes back here.
+"""
+
+import builtins
+import ctypes.wintypes
+
+from paramiko.util import u
+
+
+######################
+# jaraco.windows.error
+
+
+def format_system_message(errno):
+ """
+ Call FormatMessage with a system error number to retrieve
+ the descriptive error message.
+ """
+ # first some flags used by FormatMessageW
+ ALLOCATE_BUFFER = 0x100
+ FROM_SYSTEM = 0x1000
+
+ # Let FormatMessageW allocate the buffer (we'll free it below)
+ # Also, let it know we want a system error message.
+ flags = ALLOCATE_BUFFER | FROM_SYSTEM
+ source = None
+ message_id = errno
+ language_id = 0
+ result_buffer = ctypes.wintypes.LPWSTR()
+ buffer_size = 0
+ arguments = None
+ bytes = ctypes.windll.kernel32.FormatMessageW(
+ flags,
+ source,
+ message_id,
+ language_id,
+ ctypes.byref(result_buffer),
+ buffer_size,
+ arguments,
+ )
+ # note the following will cause an infinite loop if GetLastError
+ # repeatedly returns an error that cannot be formatted, although
+ # this should not happen.
+ handle_nonzero_success(bytes)
+ message = result_buffer.value
+ ctypes.windll.kernel32.LocalFree(result_buffer)
+ return message
+
+
+class WindowsError(builtins.WindowsError):
+ """more info about errors at
+ http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx"""
+
+ def __init__(self, value=None):
+ if value is None:
+ value = ctypes.windll.kernel32.GetLastError()
+ strerror = format_system_message(value)
+ args = 0, strerror, None, value
+ super().__init__(*args)
+
+ @property
+ def message(self):
+ return self.strerror
+
+ @property
+ def code(self):
+ return self.winerror
+
+ def __str__(self):
+ return self.message
+
+ def __repr__(self):
+ return "{self.__class__.__name__}({self.winerror})".format(**vars())
+
+
+def handle_nonzero_success(result):
+ if result == 0:
+ raise WindowsError()
+
+
+###########################
+# jaraco.windows.api.memory
+
+GMEM_MOVEABLE = 0x2
+
+GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc
+GlobalAlloc.argtypes = ctypes.wintypes.UINT, ctypes.c_size_t
+GlobalAlloc.restype = ctypes.wintypes.HANDLE
+
+GlobalLock = ctypes.windll.kernel32.GlobalLock
+GlobalLock.argtypes = (ctypes.wintypes.HGLOBAL,)
+GlobalLock.restype = ctypes.wintypes.LPVOID
+
+GlobalUnlock = ctypes.windll.kernel32.GlobalUnlock
+GlobalUnlock.argtypes = (ctypes.wintypes.HGLOBAL,)
+GlobalUnlock.restype = ctypes.wintypes.BOOL
+
+GlobalSize = ctypes.windll.kernel32.GlobalSize
+GlobalSize.argtypes = (ctypes.wintypes.HGLOBAL,)
+GlobalSize.restype = ctypes.c_size_t
+
+CreateFileMapping = ctypes.windll.kernel32.CreateFileMappingW
+CreateFileMapping.argtypes = [
+ ctypes.wintypes.HANDLE,
+ ctypes.c_void_p,
+ ctypes.wintypes.DWORD,
+ ctypes.wintypes.DWORD,
+ ctypes.wintypes.DWORD,
+ ctypes.wintypes.LPWSTR,
+]
+CreateFileMapping.restype = ctypes.wintypes.HANDLE
+
+MapViewOfFile = ctypes.windll.kernel32.MapViewOfFile
+MapViewOfFile.restype = ctypes.wintypes.HANDLE
+
+UnmapViewOfFile = ctypes.windll.kernel32.UnmapViewOfFile
+UnmapViewOfFile.argtypes = (ctypes.wintypes.HANDLE,)
+
+RtlMoveMemory = ctypes.windll.kernel32.RtlMoveMemory
+RtlMoveMemory.argtypes = (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)
+
+ctypes.windll.kernel32.LocalFree.argtypes = (ctypes.wintypes.HLOCAL,)
+
+#####################
+# jaraco.windows.mmap
+
+
+class MemoryMap:
+ """
+ A memory map object which can have security attributes overridden.
+ """
+
+ def __init__(self, name, length, security_attributes=None):
+ self.name = name
+ self.length = length
+ self.security_attributes = security_attributes
+ self.pos = 0
+
+ def __enter__(self):
+ p_SA = (
+ ctypes.byref(self.security_attributes)
+ if self.security_attributes
+ else None
+ )
+ INVALID_HANDLE_VALUE = -1
+ PAGE_READWRITE = 0x4
+ FILE_MAP_WRITE = 0x2
+ filemap = ctypes.windll.kernel32.CreateFileMappingW(
+ INVALID_HANDLE_VALUE,
+ p_SA,
+ PAGE_READWRITE,
+ 0,
+ self.length,
+ u(self.name),
+ )
+ handle_nonzero_success(filemap)
+ if filemap == INVALID_HANDLE_VALUE:
+ raise Exception("Failed to create file mapping")
+ self.filemap = filemap
+ self.view = MapViewOfFile(filemap, FILE_MAP_WRITE, 0, 0, 0)
+ return self
+
+ def seek(self, pos):
+ self.pos = pos
+
+ def write(self, msg):
+ assert isinstance(msg, bytes)
+ n = len(msg)
+ if self.pos + n >= self.length: # A little safety.
+ raise ValueError(f"Refusing to write {n} bytes")
+ dest = self.view + self.pos
+ length = ctypes.c_size_t(n)
+ ctypes.windll.kernel32.RtlMoveMemory(dest, msg, length)
+ self.pos += n
+
+ def read(self, n):
+ """
+ Read n bytes from mapped view.
+ """
+ out = ctypes.create_string_buffer(n)
+ source = self.view + self.pos
+ length = ctypes.c_size_t(n)
+ ctypes.windll.kernel32.RtlMoveMemory(out, source, length)
+ self.pos += n
+ return out.raw
+
+ def __exit__(self, exc_type, exc_val, tb):
+ ctypes.windll.kernel32.UnmapViewOfFile(self.view)
+ ctypes.windll.kernel32.CloseHandle(self.filemap)
+
+
+#############################
+# jaraco.windows.api.security
+
+# from WinNT.h
+READ_CONTROL = 0x00020000
+STANDARD_RIGHTS_REQUIRED = 0x000F0000
+STANDARD_RIGHTS_READ = READ_CONTROL
+STANDARD_RIGHTS_WRITE = READ_CONTROL
+STANDARD_RIGHTS_EXECUTE = READ_CONTROL
+STANDARD_RIGHTS_ALL = 0x001F0000
+
+# from NTSecAPI.h
+POLICY_VIEW_LOCAL_INFORMATION = 0x00000001
+POLICY_VIEW_AUDIT_INFORMATION = 0x00000002
+POLICY_GET_PRIVATE_INFORMATION = 0x00000004
+POLICY_TRUST_ADMIN = 0x00000008
+POLICY_CREATE_ACCOUNT = 0x00000010
+POLICY_CREATE_SECRET = 0x00000020
+POLICY_CREATE_PRIVILEGE = 0x00000040
+POLICY_SET_DEFAULT_QUOTA_LIMITS = 0x00000080
+POLICY_SET_AUDIT_REQUIREMENTS = 0x00000100
+POLICY_AUDIT_LOG_ADMIN = 0x00000200
+POLICY_SERVER_ADMIN = 0x00000400
+POLICY_LOOKUP_NAMES = 0x00000800
+POLICY_NOTIFICATION = 0x00001000
+
+POLICY_ALL_ACCESS = (
+ STANDARD_RIGHTS_REQUIRED
+ | POLICY_VIEW_LOCAL_INFORMATION
+ | POLICY_VIEW_AUDIT_INFORMATION
+ | POLICY_GET_PRIVATE_INFORMATION
+ | POLICY_TRUST_ADMIN
+ | POLICY_CREATE_ACCOUNT
+ | POLICY_CREATE_SECRET
+ | POLICY_CREATE_PRIVILEGE
+ | POLICY_SET_DEFAULT_QUOTA_LIMITS
+ | POLICY_SET_AUDIT_REQUIREMENTS
+ | POLICY_AUDIT_LOG_ADMIN
+ | POLICY_SERVER_ADMIN
+ | POLICY_LOOKUP_NAMES
+)
+
+
+POLICY_READ = (
+ STANDARD_RIGHTS_READ
+ | POLICY_VIEW_AUDIT_INFORMATION
+ | POLICY_GET_PRIVATE_INFORMATION
+)
+
+POLICY_WRITE = (
+ STANDARD_RIGHTS_WRITE
+ | POLICY_TRUST_ADMIN
+ | POLICY_CREATE_ACCOUNT
+ | POLICY_CREATE_SECRET
+ | POLICY_CREATE_PRIVILEGE
+ | POLICY_SET_DEFAULT_QUOTA_LIMITS
+ | POLICY_SET_AUDIT_REQUIREMENTS
+ | POLICY_AUDIT_LOG_ADMIN
+ | POLICY_SERVER_ADMIN
+)
+
+POLICY_EXECUTE = (
+ STANDARD_RIGHTS_EXECUTE
+ | POLICY_VIEW_LOCAL_INFORMATION
+ | POLICY_LOOKUP_NAMES
+)
+
+
+class TokenAccess:
+ TOKEN_QUERY = 0x8
+
+
+class TokenInformationClass:
+ TokenUser = 1
+
+
+class TOKEN_USER(ctypes.Structure):
+ num = 1
+ _fields_ = [
+ ("SID", ctypes.c_void_p),
+ ("ATTRIBUTES", ctypes.wintypes.DWORD),
+ ]
+
+
+class SECURITY_DESCRIPTOR(ctypes.Structure):
+ """
+ typedef struct _SECURITY_DESCRIPTOR
+ {
+ UCHAR Revision;
+ UCHAR Sbz1;
+ SECURITY_DESCRIPTOR_CONTROL Control;
+ PSID Owner;
+ PSID Group;
+ PACL Sacl;
+ PACL Dacl;
+ } SECURITY_DESCRIPTOR;
+ """
+
+ SECURITY_DESCRIPTOR_CONTROL = ctypes.wintypes.USHORT
+ REVISION = 1
+
+ _fields_ = [
+ ("Revision", ctypes.c_ubyte),
+ ("Sbz1", ctypes.c_ubyte),
+ ("Control", SECURITY_DESCRIPTOR_CONTROL),
+ ("Owner", ctypes.c_void_p),
+ ("Group", ctypes.c_void_p),
+ ("Sacl", ctypes.c_void_p),
+ ("Dacl", ctypes.c_void_p),
+ ]
+
+
+class SECURITY_ATTRIBUTES(ctypes.Structure):
+ """
+ typedef struct _SECURITY_ATTRIBUTES {
+ DWORD nLength;
+ LPVOID lpSecurityDescriptor;
+ BOOL bInheritHandle;
+ } SECURITY_ATTRIBUTES;
+ """
+
+ _fields_ = [
+ ("nLength", ctypes.wintypes.DWORD),
+ ("lpSecurityDescriptor", ctypes.c_void_p),
+ ("bInheritHandle", ctypes.wintypes.BOOL),
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.nLength = ctypes.sizeof(SECURITY_ATTRIBUTES)
+
+ @property
+ def descriptor(self):
+ return self._descriptor
+
+ @descriptor.setter
+ def descriptor(self, value):
+ self._descriptor = value
+ self.lpSecurityDescriptor = ctypes.addressof(value)
+
+
+ctypes.windll.advapi32.SetSecurityDescriptorOwner.argtypes = (
+ ctypes.POINTER(SECURITY_DESCRIPTOR),
+ ctypes.c_void_p,
+ ctypes.wintypes.BOOL,
+)
+
+#########################
+# jaraco.windows.security
+
+
+def GetTokenInformation(token, information_class):
+ """
+ Given a token, get the token information for it.
+ """
+ data_size = ctypes.wintypes.DWORD()
+ ctypes.windll.advapi32.GetTokenInformation(
+ token, information_class.num, 0, 0, ctypes.byref(data_size)
+ )
+ data = ctypes.create_string_buffer(data_size.value)
+ handle_nonzero_success(
+ ctypes.windll.advapi32.GetTokenInformation(
+ token,
+ information_class.num,
+ ctypes.byref(data),
+ ctypes.sizeof(data),
+ ctypes.byref(data_size),
+ )
+ )
+ return ctypes.cast(data, ctypes.POINTER(TOKEN_USER)).contents
+
+
+def OpenProcessToken(proc_handle, access):
+ result = ctypes.wintypes.HANDLE()
+ proc_handle = ctypes.wintypes.HANDLE(proc_handle)
+ handle_nonzero_success(
+ ctypes.windll.advapi32.OpenProcessToken(
+ proc_handle, access, ctypes.byref(result)
+ )
+ )
+ return result
+
+
+def get_current_user():
+ """
+ Return a TOKEN_USER for the owner of this process.
+ """
+ process = OpenProcessToken(
+ ctypes.windll.kernel32.GetCurrentProcess(), TokenAccess.TOKEN_QUERY
+ )
+ return GetTokenInformation(process, TOKEN_USER)
+
+
+def get_security_attributes_for_user(user=None):
+ """
+ Return a SECURITY_ATTRIBUTES structure with the SID set to the
+ specified user (uses current user if none is specified).
+ """
+ if user is None:
+ user = get_current_user()
+
+ assert isinstance(user, TOKEN_USER), "user must be TOKEN_USER instance"
+
+ SD = SECURITY_DESCRIPTOR()
+ SA = SECURITY_ATTRIBUTES()
+ # by attaching the actual security descriptor, it will be garbage-
+ # collected with the security attributes
+ SA.descriptor = SD
+ SA.bInheritHandle = 1
+
+ ctypes.windll.advapi32.InitializeSecurityDescriptor(
+ ctypes.byref(SD), SECURITY_DESCRIPTOR.REVISION
+ )
+ ctypes.windll.advapi32.SetSecurityDescriptorOwner(
+ ctypes.byref(SD), user.SID, 0
+ )
+ return SA
diff --git a/paramiko/agent.py b/paramiko/agent.py
new file mode 100644
index 0000000..b29a0d1
--- /dev/null
+++ b/paramiko/agent.py
@@ -0,0 +1,497 @@
+# Copyright (C) 2003-2007 John Rochester <john@jrochester.org>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+SSH Agent interface
+"""
+
+import os
+import socket
+import struct
+import sys
+import threading
+import time
+import tempfile
+import stat
+from logging import DEBUG
+from select import select
+from paramiko.common import io_sleep, byte_chr
+
+from paramiko.ssh_exception import SSHException, AuthenticationException
+from paramiko.message import Message
+from paramiko.pkey import PKey, UnknownKeyType
+from paramiko.util import asbytes, get_logger
+
+cSSH2_AGENTC_REQUEST_IDENTITIES = byte_chr(11)
+SSH2_AGENT_IDENTITIES_ANSWER = 12
+cSSH2_AGENTC_SIGN_REQUEST = byte_chr(13)
+SSH2_AGENT_SIGN_RESPONSE = 14
+
+SSH_AGENT_RSA_SHA2_256 = 2
+SSH_AGENT_RSA_SHA2_512 = 4
+# NOTE: RFC mildly confusing; while these flags are OR'd together, OpenSSH at
+# least really treats them like "AND"s, in the sense that if it finds the
+# SHA256 flag set it won't continue looking at the SHA512 one; it
+# short-circuits right away.
+# Thus, we never want to eg submit 6 to say "either's good".
+ALGORITHM_FLAG_MAP = {
+ "rsa-sha2-256": SSH_AGENT_RSA_SHA2_256,
+ "rsa-sha2-512": SSH_AGENT_RSA_SHA2_512,
+}
+for key, value in list(ALGORITHM_FLAG_MAP.items()):
+ ALGORITHM_FLAG_MAP[f"{key}-cert-v01@openssh.com"] = value
+
+
+# TODO 4.0: rename all these - including making some of their methods public?
+class AgentSSH:
+ def __init__(self):
+ self._conn = None
+ self._keys = ()
+
+ def get_keys(self):
+ """
+ Return the list of keys available through the SSH agent, if any. If
+ no SSH agent was running (or it couldn't be contacted), an empty list
+ will be returned.
+
+ This method performs no IO, just returns the list of keys retrieved
+ when the connection was made.
+
+ :return:
+ a tuple of `.AgentKey` objects representing keys available on the
+ SSH agent
+ """
+ return self._keys
+
+ def _connect(self, conn):
+ self._conn = conn
+ ptype, result = self._send_message(cSSH2_AGENTC_REQUEST_IDENTITIES)
+ if ptype != SSH2_AGENT_IDENTITIES_ANSWER:
+ raise SSHException("could not get keys from ssh-agent")
+ keys = []
+ for i in range(result.get_int()):
+ keys.append(
+ AgentKey(
+ agent=self,
+ blob=result.get_binary(),
+ comment=result.get_text(),
+ )
+ )
+ self._keys = tuple(keys)
+
+ def _close(self):
+ if self._conn is not None:
+ self._conn.close()
+ self._conn = None
+ self._keys = ()
+
+ def _send_message(self, msg):
+ msg = asbytes(msg)
+ self._conn.send(struct.pack(">I", len(msg)) + msg)
+ data = self._read_all(4)
+ msg = Message(self._read_all(struct.unpack(">I", data)[0]))
+ return ord(msg.get_byte()), msg
+
+ def _read_all(self, wanted):
+ result = self._conn.recv(wanted)
+ while len(result) < wanted:
+ if len(result) == 0:
+ raise SSHException("lost ssh-agent")
+ extra = self._conn.recv(wanted - len(result))
+ if len(extra) == 0:
+ raise SSHException("lost ssh-agent")
+ result += extra
+ return result
+
+
+class AgentProxyThread(threading.Thread):
+ """
+ Class in charge of communication between two channels.
+ """
+
+ def __init__(self, agent):
+ threading.Thread.__init__(self, target=self.run)
+ self._agent = agent
+ self._exit = False
+
+ def run(self):
+ try:
+ (r, addr) = self.get_connection()
+ # Found that r should be either
+ # a socket from the socket library or None
+ self.__inr = r
+ # The address should be an IP address as a string? or None
+ self.__addr = addr
+ self._agent.connect()
+ if not isinstance(self._agent, int) and (
+ self._agent._conn is None
+ or not hasattr(self._agent._conn, "fileno")
+ ):
+ raise AuthenticationException("Unable to connect to SSH agent")
+ self._communicate()
+ except:
+ # XXX Not sure what to do here ... raise or pass ?
+ raise
+
+ def _communicate(self):
+ import fcntl
+
+ oldflags = fcntl.fcntl(self.__inr, fcntl.F_GETFL)
+ fcntl.fcntl(self.__inr, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
+ while not self._exit:
+ events = select([self._agent._conn, self.__inr], [], [], 0.5)
+ for fd in events[0]:
+ if self._agent._conn == fd:
+ data = self._agent._conn.recv(512)
+ if len(data) != 0:
+ self.__inr.send(data)
+ else:
+ self._close()
+ break
+ elif self.__inr == fd:
+ data = self.__inr.recv(512)
+ if len(data) != 0:
+ self._agent._conn.send(data)
+ else:
+ self._close()
+ break
+ time.sleep(io_sleep)
+
+ def _close(self):
+ self._exit = True
+ self.__inr.close()
+ self._agent._conn.close()
+
+
+class AgentLocalProxy(AgentProxyThread):
+ """
+ Class to be used when wanting to ask a local SSH Agent being
+ asked from a remote fake agent (so use a unix socket for ex.)
+ """
+
+ def __init__(self, agent):
+ AgentProxyThread.__init__(self, agent)
+
+ def get_connection(self):
+ """
+ Return a pair of socket object and string address.
+
+ May block!
+ """
+ conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ conn.bind(self._agent._get_filename())
+ conn.listen(1)
+ (r, addr) = conn.accept()
+ return r, addr
+ except:
+ raise
+
+
+class AgentRemoteProxy(AgentProxyThread):
+ """
+ Class to be used when wanting to ask a remote SSH Agent
+ """
+
+ def __init__(self, agent, chan):
+ AgentProxyThread.__init__(self, agent)
+ self.__chan = chan
+
+ def get_connection(self):
+ return self.__chan, None
+
+
+def get_agent_connection():
+ """
+ Returns some SSH agent object, or None if none were found/supported.
+
+ .. versionadded:: 2.10
+ """
+ if ("SSH_AUTH_SOCK" in os.environ) and (sys.platform != "win32"):
+ conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ conn.connect(os.environ["SSH_AUTH_SOCK"])
+ return conn
+ except:
+ # probably a dangling env var: the ssh agent is gone
+ return
+ elif sys.platform == "win32":
+ from . import win_pageant, win_openssh
+
+ conn = None
+ if win_pageant.can_talk_to_agent():
+ conn = win_pageant.PageantConnection()
+ elif win_openssh.can_talk_to_agent():
+ conn = win_openssh.OpenSSHAgentConnection()
+ return conn
+ else:
+ # no agent support
+ return
+
+
+class AgentClientProxy:
+ """
+ Class proxying request as a client:
+
+ #. client ask for a request_forward_agent()
+ #. server creates a proxy and a fake SSH Agent
+ #. server ask for establishing a connection when needed,
+ calling the forward_agent_handler at client side.
+ #. the forward_agent_handler launch a thread for connecting
+ the remote fake agent and the local agent
+ #. Communication occurs ...
+ """
+
+ def __init__(self, chanRemote):
+ self._conn = None
+ self.__chanR = chanRemote
+ self.thread = AgentRemoteProxy(self, chanRemote)
+ self.thread.start()
+
+ def __del__(self):
+ self.close()
+
+ def connect(self):
+ """
+ Method automatically called by ``AgentProxyThread.run``.
+ """
+ conn = get_agent_connection()
+ if not conn:
+ return
+ self._conn = conn
+
+ def close(self):
+ """
+ Close the current connection and terminate the agent
+ Should be called manually
+ """
+ if hasattr(self, "thread"):
+ self.thread._exit = True
+ self.thread.join(1000)
+ if self._conn is not None:
+ self._conn.close()
+
+
+class AgentServerProxy(AgentSSH):
+ """
+ Allows an SSH server to access a forwarded agent.
+
+ This also creates a unix domain socket on the system to allow external
+ programs to also access the agent. For this reason, you probably only want
+ to create one of these.
+
+ :meth:`connect` must be called before it is usable. This will also load the
+ list of keys the agent contains. You must also call :meth:`close` in
+ order to clean up the unix socket and the thread that maintains it.
+ (:class:`contextlib.closing` might be helpful to you.)
+
+ :param .Transport t: Transport used for SSH Agent communication forwarding
+
+ :raises: `.SSHException` -- mostly if we lost the agent
+ """
+
+ def __init__(self, t):
+ AgentSSH.__init__(self)
+ self.__t = t
+ self._dir = tempfile.mkdtemp("sshproxy")
+ os.chmod(self._dir, stat.S_IRWXU)
+ self._file = self._dir + "/sshproxy.ssh"
+ self.thread = AgentLocalProxy(self)
+ self.thread.start()
+
+ def __del__(self):
+ self.close()
+
+ def connect(self):
+ conn_sock = self.__t.open_forward_agent_channel()
+ if conn_sock is None:
+ raise SSHException("lost ssh-agent")
+ conn_sock.set_name("auth-agent")
+ self._connect(conn_sock)
+
+ def close(self):
+ """
+ Terminate the agent, clean the files, close connections
+ Should be called manually
+ """
+ os.remove(self._file)
+ os.rmdir(self._dir)
+ self.thread._exit = True
+ self.thread.join(1000)
+ self._close()
+
+ def get_env(self):
+ """
+ Helper for the environment under unix
+
+ :return:
+ a dict containing the ``SSH_AUTH_SOCK`` environment variables
+ """
+ return {"SSH_AUTH_SOCK": self._get_filename()}
+
+ def _get_filename(self):
+ return self._file
+
+
+class AgentRequestHandler:
+ """
+ Primary/default implementation of SSH agent forwarding functionality.
+
+ Simply instantiate this class, handing it a live command-executing session
+ object, and it will handle forwarding any local SSH agent processes it
+ finds.
+
+ For example::
+
+ # Connect
+ client = SSHClient()
+ client.connect(host, port, username)
+ # Obtain session
+ session = client.get_transport().open_session()
+ # Forward local agent
+ AgentRequestHandler(session)
+ # Commands executed after this point will see the forwarded agent on
+ # the remote end.
+ session.exec_command("git clone https://my.git.repository/")
+ """
+
+ def __init__(self, chanClient):
+ self._conn = None
+ self.__chanC = chanClient
+ chanClient.request_forward_agent(self._forward_agent_handler)
+ self.__clientProxys = []
+
+ def _forward_agent_handler(self, chanRemote):
+ self.__clientProxys.append(AgentClientProxy(chanRemote))
+
+ def __del__(self):
+ self.close()
+
+ def close(self):
+ for p in self.__clientProxys:
+ p.close()
+
+
+class Agent(AgentSSH):
+ """
+ Client interface for using private keys from an SSH agent running on the
+ local machine. If an SSH agent is running, this class can be used to
+ connect to it and retrieve `.PKey` objects which can be used when
+ attempting to authenticate to remote SSH servers.
+
+ Upon initialization, a session with the local machine's SSH agent is
+ opened, if one is running. If no agent is running, initialization will
+ succeed, but `get_keys` will return an empty tuple.
+
+ :raises: `.SSHException` --
+ if an SSH agent is found, but speaks an incompatible protocol
+
+ .. versionchanged:: 2.10
+ Added support for native openssh agent on windows (extending previous
+ putty pageant support)
+ """
+
+ def __init__(self):
+ AgentSSH.__init__(self)
+
+ conn = get_agent_connection()
+ if not conn:
+ return
+ self._connect(conn)
+
+ def close(self):
+ """
+ Close the SSH agent connection.
+ """
+ self._close()
+
+
+class AgentKey(PKey):
+ """
+ Private key held in a local SSH agent. This type of key can be used for
+ authenticating to a remote server (signing). Most other key operations
+ work as expected.
+
+ .. versionchanged:: 3.2
+ Added the ``comment`` kwarg and attribute.
+
+ .. versionchanged:: 3.2
+ Added the ``.inner_key`` attribute holding a reference to the 'real'
+ key instance this key is a proxy for, if one was obtainable, else None.
+ """
+
+ def __init__(self, agent, blob, comment=""):
+ self.agent = agent
+ self.blob = blob
+ self.comment = comment
+ msg = Message(blob)
+ self.name = msg.get_text()
+ self._logger = get_logger(__file__)
+ self.inner_key = None
+ try:
+ self.inner_key = PKey.from_type_string(
+ key_type=self.name, key_bytes=blob
+ )
+ except UnknownKeyType:
+ # Log, but don't explode, since inner_key is a best-effort thing.
+ err = "Unable to derive inner_key for agent key of type {!r}"
+ self.log(DEBUG, err.format(self.name))
+
+ def log(self, *args, **kwargs):
+ return self._logger.log(*args, **kwargs)
+
+ def asbytes(self):
+ # Prefer inner_key.asbytes, since that will differ for eg RSA-CERT
+ return self.inner_key.asbytes() if self.inner_key else self.blob
+
+ def get_name(self):
+ return self.name
+
+ def get_bits(self):
+ # Have to work around PKey's default get_bits being crap
+ if self.inner_key is not None:
+ return self.inner_key.get_bits()
+ return super().get_bits()
+
+ def __getattr__(self, name):
+ """
+ Proxy any un-implemented methods/properties to the inner_key.
+ """
+ if self.inner_key is None: # nothing to proxy to
+ raise AttributeError(name)
+ return getattr(self.inner_key, name)
+
+ @property
+ def _fields(self):
+ fallback = [self.get_name(), self.blob]
+ return self.inner_key._fields if self.inner_key else fallback
+
+ def sign_ssh_data(self, data, algorithm=None):
+ msg = Message()
+ msg.add_byte(cSSH2_AGENTC_SIGN_REQUEST)
+ # NOTE: this used to be just self.blob, which is not entirely right for
+ # RSA-CERT 'keys' - those end up always degrading to ssh-rsa type
+ # signatures, for reasons probably internal to OpenSSH's agent code,
+ # even if everything else wants SHA2 (including our flag map).
+ msg.add_string(self.asbytes())
+ msg.add_string(data)
+ msg.add_int(ALGORITHM_FLAG_MAP.get(algorithm, 0))
+ ptype, result = self.agent._send_message(msg)
+ if ptype != SSH2_AGENT_SIGN_RESPONSE:
+ raise SSHException("key cannot be used for signing")
+ return result.get_binary()
diff --git a/paramiko/auth_handler.py b/paramiko/auth_handler.py
new file mode 100644
index 0000000..bc7f298
--- /dev/null
+++ b/paramiko/auth_handler.py
@@ -0,0 +1,1092 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+`.AuthHandler`
+"""
+
+import weakref
+import threading
+import time
+import re
+
+from paramiko.common import (
+ cMSG_SERVICE_REQUEST,
+ cMSG_DISCONNECT,
+ DISCONNECT_SERVICE_NOT_AVAILABLE,
+ DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
+ cMSG_USERAUTH_REQUEST,
+ cMSG_SERVICE_ACCEPT,
+ DEBUG,
+ AUTH_SUCCESSFUL,
+ INFO,
+ cMSG_USERAUTH_SUCCESS,
+ cMSG_USERAUTH_FAILURE,
+ AUTH_PARTIALLY_SUCCESSFUL,
+ cMSG_USERAUTH_INFO_REQUEST,
+ WARNING,
+ AUTH_FAILED,
+ cMSG_USERAUTH_PK_OK,
+ cMSG_USERAUTH_INFO_RESPONSE,
+ MSG_SERVICE_REQUEST,
+ MSG_SERVICE_ACCEPT,
+ MSG_USERAUTH_REQUEST,
+ MSG_USERAUTH_SUCCESS,
+ MSG_USERAUTH_FAILURE,
+ MSG_USERAUTH_BANNER,
+ MSG_USERAUTH_INFO_REQUEST,
+ MSG_USERAUTH_INFO_RESPONSE,
+ cMSG_USERAUTH_GSSAPI_RESPONSE,
+ cMSG_USERAUTH_GSSAPI_TOKEN,
+ cMSG_USERAUTH_GSSAPI_MIC,
+ MSG_USERAUTH_GSSAPI_RESPONSE,
+ MSG_USERAUTH_GSSAPI_TOKEN,
+ MSG_USERAUTH_GSSAPI_ERROR,
+ MSG_USERAUTH_GSSAPI_ERRTOK,
+ MSG_USERAUTH_GSSAPI_MIC,
+ MSG_NAMES,
+ cMSG_USERAUTH_BANNER,
+)
+from paramiko.message import Message
+from paramiko.util import b, u
+from paramiko.ssh_exception import (
+ SSHException,
+ AuthenticationException,
+ BadAuthenticationType,
+ PartialAuthentication,
+)
+from paramiko.server import InteractiveQuery
+from paramiko.ssh_gss import GSSAuth, GSS_EXCEPTIONS
+
+
+class AuthHandler:
+ """
+ Internal class to handle the mechanics of authentication.
+ """
+
+ def __init__(self, transport):
+ self.transport = weakref.proxy(transport)
+ self.username = None
+ self.authenticated = False
+ self.auth_event = None
+ self.auth_method = ""
+ self.banner = None
+ self.password = None
+ self.private_key = None
+ self.interactive_handler = None
+ self.submethods = None
+ # for server mode:
+ self.auth_username = None
+ self.auth_fail_count = 0
+ # for GSSAPI
+ self.gss_host = None
+ self.gss_deleg_creds = True
+
+ def _log(self, *args):
+ return self.transport._log(*args)
+
+ def is_authenticated(self):
+ return self.authenticated
+
+ def get_username(self):
+ if self.transport.server_mode:
+ return self.auth_username
+ else:
+ return self.username
+
+ def auth_none(self, username, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "none"
+ self.username = username
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_publickey(self, username, key, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "publickey"
+ self.username = username
+ self.private_key = key
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_password(self, username, password, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "password"
+ self.username = username
+ self.password = password
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_interactive(self, username, handler, event, submethods=""):
+ """
+ response_list = handler(title, instructions, prompt_list)
+ """
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "keyboard-interactive"
+ self.username = username
+ self.interactive_handler = handler
+ self.submethods = submethods
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "gssapi-with-mic"
+ self.username = username
+ self.gss_host = gss_host
+ self.gss_deleg_creds = gss_deleg_creds
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_gssapi_keyex(self, username, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "gssapi-keyex"
+ self.username = username
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def abort(self):
+ if self.auth_event is not None:
+ self.auth_event.set()
+
+ # ...internals...
+
+ def _request_auth(self):
+ m = Message()
+ m.add_byte(cMSG_SERVICE_REQUEST)
+ m.add_string("ssh-userauth")
+ self.transport._send_message(m)
+
+ def _disconnect_service_not_available(self):
+ m = Message()
+ m.add_byte(cMSG_DISCONNECT)
+ m.add_int(DISCONNECT_SERVICE_NOT_AVAILABLE)
+ m.add_string("Service not available")
+ m.add_string("en")
+ self.transport._send_message(m)
+ self.transport.close()
+
+ def _disconnect_no_more_auth(self):
+ m = Message()
+ m.add_byte(cMSG_DISCONNECT)
+ m.add_int(DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE)
+ m.add_string("No more auth methods available")
+ m.add_string("en")
+ self.transport._send_message(m)
+ self.transport.close()
+
+ def _get_key_type_and_bits(self, key):
+ """
+ Given any key, return its type/algorithm & bits-to-sign.
+
+ Intended for input to or verification of, key signatures.
+ """
+ # Use certificate contents, if available, plain pubkey otherwise
+ if key.public_blob:
+ return key.public_blob.key_type, key.public_blob.key_blob
+ else:
+ return key.get_name(), key
+
+ def _get_session_blob(self, key, service, username, algorithm):
+ m = Message()
+ m.add_string(self.transport.session_id)
+ m.add_byte(cMSG_USERAUTH_REQUEST)
+ m.add_string(username)
+ m.add_string(service)
+ m.add_string("publickey")
+ m.add_boolean(True)
+ _, bits = self._get_key_type_and_bits(key)
+ m.add_string(algorithm)
+ m.add_string(bits)
+ return m.asbytes()
+
+ def wait_for_response(self, event):
+ max_ts = None
+ if self.transport.auth_timeout is not None:
+ max_ts = time.time() + self.transport.auth_timeout
+ while True:
+ event.wait(0.1)
+ if not self.transport.is_active():
+ e = self.transport.get_exception()
+ if (e is None) or issubclass(e.__class__, EOFError):
+ e = AuthenticationException(
+ "Authentication failed: transport shut down or saw EOF"
+ )
+ raise e
+ if event.is_set():
+ break
+ if max_ts is not None and max_ts <= time.time():
+ raise AuthenticationException("Authentication timeout.")
+
+ if not self.is_authenticated():
+ e = self.transport.get_exception()
+ if e is None:
+ e = AuthenticationException("Authentication failed.")
+ # this is horrible. Python Exception isn't yet descended from
+ # object, so type(e) won't work. :(
+ # TODO 4.0: lol. just lmao.
+ if issubclass(e.__class__, PartialAuthentication):
+ return e.allowed_types
+ raise e
+ return []
+
+ def _parse_service_request(self, m):
+ service = m.get_text()
+ if self.transport.server_mode and (service == "ssh-userauth"):
+ # accepted
+ m = Message()
+ m.add_byte(cMSG_SERVICE_ACCEPT)
+ m.add_string(service)
+ self.transport._send_message(m)
+ banner, language = self.transport.server_object.get_banner()
+ if banner:
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_BANNER)
+ m.add_string(banner)
+ m.add_string(language)
+ self.transport._send_message(m)
+ return
+ # dunno this one
+ self._disconnect_service_not_available()
+
+ def _generate_key_from_request(self, algorithm, keyblob):
+ # For use in server mode.
+ options = self.transport.preferred_pubkeys
+ if algorithm.replace("-cert-v01@openssh.com", "") not in options:
+ err = (
+ "Auth rejected: pubkey algorithm '{}' unsupported or disabled"
+ )
+ self._log(INFO, err.format(algorithm))
+ return None
+ return self.transport._key_info[algorithm](Message(keyblob))
+
+ def _choose_fallback_pubkey_algorithm(self, key_type, my_algos):
+ # Fallback: first one in our (possibly tweaked by caller) list
+ pubkey_algo = my_algos[0]
+ msg = "Server did not send a server-sig-algs list; defaulting to our first preferred algo ({!r})" # noqa
+ self._log(DEBUG, msg.format(pubkey_algo))
+ self._log(
+ DEBUG,
+ "NOTE: you may use the 'disabled_algorithms' SSHClient/Transport init kwarg to disable that or other algorithms if your server does not support them!", # noqa
+ )
+ return pubkey_algo
+
+ def _finalize_pubkey_algorithm(self, key_type):
+ # Short-circuit for non-RSA keys
+ if "rsa" not in key_type:
+ return key_type
+ self._log(
+ DEBUG,
+ "Finalizing pubkey algorithm for key of type {!r}".format(
+ key_type
+ ),
+ )
+ # NOTE re #2017: When the key is an RSA cert and the remote server is
+ # OpenSSH 7.7 or earlier, always use ssh-rsa-cert-v01@openssh.com.
+ # Those versions of the server won't support rsa-sha2 family sig algos
+ # for certs specifically, and in tandem with various server bugs
+ # regarding server-sig-algs, it's impossible to fit this into the rest
+ # of the logic here.
+ if key_type.endswith("-cert-v01@openssh.com") and re.search(
+ r"-OpenSSH_(?:[1-6]|7\.[0-7])", self.transport.remote_version
+ ):
+ pubkey_algo = "ssh-rsa-cert-v01@openssh.com"
+ self.transport._agreed_pubkey_algorithm = pubkey_algo
+ self._log(DEBUG, "OpenSSH<7.8 + RSA cert = forcing ssh-rsa!")
+ self._log(
+ DEBUG, "Agreed upon {!r} pubkey algorithm".format(pubkey_algo)
+ )
+ return pubkey_algo
+ # Normal attempts to handshake follow from here.
+ # Only consider RSA algos from our list, lest we agree on another!
+ my_algos = [x for x in self.transport.preferred_pubkeys if "rsa" in x]
+ self._log(DEBUG, "Our pubkey algorithm list: {}".format(my_algos))
+ # Short-circuit negatively if user disabled all RSA algos (heh)
+ if not my_algos:
+ raise SSHException(
+ "An RSA key was specified, but no RSA pubkey algorithms are configured!" # noqa
+ )
+ # Check for server-sig-algs if supported & sent
+ server_algo_str = u(
+ self.transport.server_extensions.get("server-sig-algs", b(""))
+ )
+ pubkey_algo = None
+ # Prefer to match against server-sig-algs
+ if server_algo_str:
+ server_algos = server_algo_str.split(",")
+ self._log(
+ DEBUG, "Server-side algorithm list: {}".format(server_algos)
+ )
+ # Only use algos from our list that the server likes, in our own
+ # preference order. (NOTE: purposefully using same style as in
+ # Transport...expect to refactor later)
+ agreement = list(filter(server_algos.__contains__, my_algos))
+ if agreement:
+ pubkey_algo = agreement[0]
+ self._log(
+ DEBUG,
+ "Agreed upon {!r} pubkey algorithm".format(pubkey_algo),
+ )
+ else:
+ self._log(DEBUG, "No common pubkey algorithms exist! Dying.")
+ # TODO: MAY want to use IncompatiblePeer again here but that's
+ # technically for initial key exchange, not pubkey auth.
+ err = "Unable to agree on a pubkey algorithm for signing a {!r} key!" # noqa
+ raise AuthenticationException(err.format(key_type))
+ # Fallback to something based purely on the key & our configuration
+ else:
+ pubkey_algo = self._choose_fallback_pubkey_algorithm(
+ key_type, my_algos
+ )
+ if key_type.endswith("-cert-v01@openssh.com"):
+ pubkey_algo += "-cert-v01@openssh.com"
+ self.transport._agreed_pubkey_algorithm = pubkey_algo
+ return pubkey_algo
+
+ def _parse_service_accept(self, m):
+ service = m.get_text()
+ if service == "ssh-userauth":
+ self._log(DEBUG, "userauth is OK")
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_REQUEST)
+ m.add_string(self.username)
+ m.add_string("ssh-connection")
+ m.add_string(self.auth_method)
+ if self.auth_method == "password":
+ m.add_boolean(False)
+ password = b(self.password)
+ m.add_string(password)
+ elif self.auth_method == "publickey":
+ m.add_boolean(True)
+ key_type, bits = self._get_key_type_and_bits(self.private_key)
+ algorithm = self._finalize_pubkey_algorithm(key_type)
+ m.add_string(algorithm)
+ m.add_string(bits)
+ blob = self._get_session_blob(
+ self.private_key,
+ "ssh-connection",
+ self.username,
+ algorithm,
+ )
+ sig = self.private_key.sign_ssh_data(blob, algorithm)
+ m.add_string(sig)
+ elif self.auth_method == "keyboard-interactive":
+ m.add_string("")
+ m.add_string(self.submethods)
+ elif self.auth_method == "gssapi-with-mic":
+ sshgss = GSSAuth(self.auth_method, self.gss_deleg_creds)
+ m.add_bytes(sshgss.ssh_gss_oids())
+ # send the supported GSSAPI OIDs to the server
+ self.transport._send_message(m)
+ ptype, m = self.transport.packetizer.read_message()
+ if ptype == MSG_USERAUTH_BANNER:
+ self._parse_userauth_banner(m)
+ ptype, m = self.transport.packetizer.read_message()
+ if ptype == MSG_USERAUTH_GSSAPI_RESPONSE:
+ # Read the mechanism selected by the server. We send just
+ # the Kerberos V5 OID, so the server can only respond with
+ # this OID.
+ mech = m.get_string()
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_TOKEN)
+ try:
+ m.add_string(
+ sshgss.ssh_init_sec_context(
+ self.gss_host, mech, self.username
+ )
+ )
+ except GSS_EXCEPTIONS as e:
+ return self._handle_local_gss_failure(e)
+ self.transport._send_message(m)
+ while True:
+ ptype, m = self.transport.packetizer.read_message()
+ if ptype == MSG_USERAUTH_GSSAPI_TOKEN:
+ srv_token = m.get_string()
+ try:
+ next_token = sshgss.ssh_init_sec_context(
+ self.gss_host,
+ mech,
+ self.username,
+ srv_token,
+ )
+ except GSS_EXCEPTIONS as e:
+ return self._handle_local_gss_failure(e)
+ # After this step the GSSAPI should not return any
+ # token. If it does, we keep sending the token to
+ # the server until no more token is returned.
+ if next_token is None:
+ break
+ else:
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_TOKEN)
+ m.add_string(next_token)
+ self.transport.send_message(m)
+ else:
+ raise SSHException(
+ "Received Package: {}".format(MSG_NAMES[ptype])
+ )
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_MIC)
+ # send the MIC to the server
+ m.add_string(sshgss.ssh_get_mic(self.transport.session_id))
+ elif ptype == MSG_USERAUTH_GSSAPI_ERRTOK:
+ # RFC 4462 says we are not required to implement GSS-API
+ # error messages.
+ # See RFC 4462 Section 3.8 in
+ # http://www.ietf.org/rfc/rfc4462.txt
+ raise SSHException("Server returned an error token")
+ elif ptype == MSG_USERAUTH_GSSAPI_ERROR:
+ maj_status = m.get_int()
+ min_status = m.get_int()
+ err_msg = m.get_string()
+ m.get_string() # Lang tag - discarded
+ raise SSHException(
+ """GSS-API Error:
+Major Status: {}
+Minor Status: {}
+Error Message: {}
+""".format(
+ maj_status, min_status, err_msg
+ )
+ )
+ elif ptype == MSG_USERAUTH_FAILURE:
+ self._parse_userauth_failure(m)
+ return
+ else:
+ raise SSHException(
+ "Received Package: {}".format(MSG_NAMES[ptype])
+ )
+ elif (
+ self.auth_method == "gssapi-keyex"
+ and self.transport.gss_kex_used
+ ):
+ kexgss = self.transport.kexgss_ctxt
+ kexgss.set_username(self.username)
+ mic_token = kexgss.ssh_get_mic(self.transport.session_id)
+ m.add_string(mic_token)
+ elif self.auth_method == "none":
+ pass
+ else:
+ raise SSHException(
+ 'Unknown auth method "{}"'.format(self.auth_method)
+ )
+ self.transport._send_message(m)
+ else:
+ self._log(
+ DEBUG, 'Service request "{}" accepted (?)'.format(service)
+ )
+
+ def _send_auth_result(self, username, method, result):
+ # okay, send result
+ m = Message()
+ if result == AUTH_SUCCESSFUL:
+ self._log(INFO, "Auth granted ({}).".format(method))
+ m.add_byte(cMSG_USERAUTH_SUCCESS)
+ self.authenticated = True
+ else:
+ self._log(INFO, "Auth rejected ({}).".format(method))
+ m.add_byte(cMSG_USERAUTH_FAILURE)
+ m.add_string(
+ self.transport.server_object.get_allowed_auths(username)
+ )
+ if result == AUTH_PARTIALLY_SUCCESSFUL:
+ m.add_boolean(True)
+ else:
+ m.add_boolean(False)
+ self.auth_fail_count += 1
+ self.transport._send_message(m)
+ if self.auth_fail_count >= 10:
+ self._disconnect_no_more_auth()
+ if result == AUTH_SUCCESSFUL:
+ self.transport._auth_trigger()
+
+ def _interactive_query(self, q):
+ # make interactive query instead of response
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_INFO_REQUEST)
+ m.add_string(q.name)
+ m.add_string(q.instructions)
+ m.add_string(bytes())
+ m.add_int(len(q.prompts))
+ for p in q.prompts:
+ m.add_string(p[0])
+ m.add_boolean(p[1])
+ self.transport._send_message(m)
+
+ def _parse_userauth_request(self, m):
+ if not self.transport.server_mode:
+ # er, uh... what?
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_FAILURE)
+ m.add_string("none")
+ m.add_boolean(False)
+ self.transport._send_message(m)
+ return
+ if self.authenticated:
+ # ignore
+ return
+ username = m.get_text()
+ service = m.get_text()
+ method = m.get_text()
+ self._log(
+ DEBUG,
+ "Auth request (type={}) service={}, username={}".format(
+ method, service, username
+ ),
+ )
+ if service != "ssh-connection":
+ self._disconnect_service_not_available()
+ return
+ if (self.auth_username is not None) and (
+ self.auth_username != username
+ ):
+ self._log(
+ WARNING,
+ "Auth rejected because the client attempted to change username in mid-flight", # noqa
+ )
+ self._disconnect_no_more_auth()
+ return
+ self.auth_username = username
+ # check if GSS-API authentication is enabled
+ gss_auth = self.transport.server_object.enable_auth_gssapi()
+
+ if method == "none":
+ result = self.transport.server_object.check_auth_none(username)
+ elif method == "password":
+ changereq = m.get_boolean()
+ password = m.get_binary()
+ try:
+ password = password.decode("UTF-8")
+ except UnicodeError:
+ # some clients/servers expect non-utf-8 passwords!
+ # in this case, just return the raw byte string.
+ pass
+ if changereq:
+ # always treated as failure, since we don't support changing
+ # passwords, but collect the list of valid auth types from
+ # the callback anyway
+ self._log(DEBUG, "Auth request to change passwords (rejected)")
+ newpassword = m.get_binary()
+ try:
+ newpassword = newpassword.decode("UTF-8", "replace")
+ except UnicodeError:
+ pass
+ result = AUTH_FAILED
+ else:
+ result = self.transport.server_object.check_auth_password(
+ username, password
+ )
+ elif method == "publickey":
+ sig_attached = m.get_boolean()
+ # NOTE: server never wants to guess a client's algo, they're
+ # telling us directly. No need for _finalize_pubkey_algorithm
+ # anywhere in this flow.
+ algorithm = m.get_text()
+ keyblob = m.get_binary()
+ try:
+ key = self._generate_key_from_request(algorithm, keyblob)
+ except SSHException as e:
+ self._log(INFO, "Auth rejected: public key: {}".format(str(e)))
+ key = None
+ except Exception as e:
+ msg = "Auth rejected: unsupported or mangled public key ({}: {})" # noqa
+ self._log(INFO, msg.format(e.__class__.__name__, e))
+ key = None
+ if key is None:
+ self._disconnect_no_more_auth()
+ return
+ # first check if this key is okay... if not, we can skip the verify
+ result = self.transport.server_object.check_auth_publickey(
+ username, key
+ )
+ if result != AUTH_FAILED:
+ # key is okay, verify it
+ if not sig_attached:
+ # client wants to know if this key is acceptable, before it
+ # signs anything... send special "ok" message
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_PK_OK)
+ m.add_string(algorithm)
+ m.add_string(keyblob)
+ self.transport._send_message(m)
+ return
+ sig = Message(m.get_binary())
+ blob = self._get_session_blob(
+ key, service, username, algorithm
+ )
+ if not key.verify_ssh_sig(blob, sig):
+ self._log(INFO, "Auth rejected: invalid signature")
+ result = AUTH_FAILED
+ elif method == "keyboard-interactive":
+ submethods = m.get_string()
+ result = self.transport.server_object.check_auth_interactive(
+ username, submethods
+ )
+ if isinstance(result, InteractiveQuery):
+ # make interactive query instead of response
+ self._interactive_query(result)
+ return
+ elif method == "gssapi-with-mic" and gss_auth:
+ sshgss = GSSAuth(method)
+ # Read the number of OID mechanisms supported by the client.
+ # OpenSSH sends just one OID. It's the Kerveros V5 OID and that's
+ # the only OID we support.
+ mechs = m.get_int()
+ # We can't accept more than one OID, so if the SSH client sends
+ # more than one, disconnect.
+ if mechs > 1:
+ self._log(
+ INFO,
+ "Disconnect: Received more than one GSS-API OID mechanism",
+ )
+ self._disconnect_no_more_auth()
+ desired_mech = m.get_string()
+ mech_ok = sshgss.ssh_check_mech(desired_mech)
+ # if we don't support the mechanism, disconnect.
+ if not mech_ok:
+ self._log(
+ INFO,
+ "Disconnect: Received an invalid GSS-API OID mechanism",
+ )
+ self._disconnect_no_more_auth()
+ # send the Kerberos V5 GSSAPI OID to the client
+ supported_mech = sshgss.ssh_gss_oids("server")
+ # RFC 4462 says we are not required to implement GSS-API error
+ # messages. See section 3.8 in http://www.ietf.org/rfc/rfc4462.txt
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_RESPONSE)
+ m.add_bytes(supported_mech)
+ self.transport.auth_handler = GssapiWithMicAuthHandler(
+ self, sshgss
+ )
+ self.transport._expected_packet = (
+ MSG_USERAUTH_GSSAPI_TOKEN,
+ MSG_USERAUTH_REQUEST,
+ MSG_SERVICE_REQUEST,
+ )
+ self.transport._send_message(m)
+ return
+ elif method == "gssapi-keyex" and gss_auth:
+ mic_token = m.get_string()
+ sshgss = self.transport.kexgss_ctxt
+ if sshgss is None:
+ # If there is no valid context, we reject the authentication
+ result = AUTH_FAILED
+ self._send_auth_result(username, method, result)
+ try:
+ sshgss.ssh_check_mic(
+ mic_token, self.transport.session_id, self.auth_username
+ )
+ except Exception:
+ result = AUTH_FAILED
+ self._send_auth_result(username, method, result)
+ raise
+ result = AUTH_SUCCESSFUL
+ self.transport.server_object.check_auth_gssapi_keyex(
+ username, result
+ )
+ else:
+ result = self.transport.server_object.check_auth_none(username)
+ # okay, send result
+ self._send_auth_result(username, method, result)
+
+ def _parse_userauth_success(self, m):
+ self._log(
+ INFO, "Authentication ({}) successful!".format(self.auth_method)
+ )
+ self.authenticated = True
+ self.transport._auth_trigger()
+ if self.auth_event is not None:
+ self.auth_event.set()
+
+ def _parse_userauth_failure(self, m):
+ authlist = m.get_list()
+ # TODO 4.0: we aren't giving callers access to authlist _unless_ it's
+ # partial authentication, so eg authtype=none can't work unless we
+ # tweak this.
+ partial = m.get_boolean()
+ if partial:
+ self._log(INFO, "Authentication continues...")
+ self._log(DEBUG, "Methods: " + str(authlist))
+ self.transport.saved_exception = PartialAuthentication(authlist)
+ elif self.auth_method not in authlist:
+ for msg in (
+ "Authentication type ({}) not permitted.".format(
+ self.auth_method
+ ),
+ "Allowed methods: {}".format(authlist),
+ ):
+ self._log(DEBUG, msg)
+ self.transport.saved_exception = BadAuthenticationType(
+ "Bad authentication type", authlist
+ )
+ else:
+ self._log(
+ INFO, "Authentication ({}) failed.".format(self.auth_method)
+ )
+ self.authenticated = False
+ self.username = None
+ if self.auth_event is not None:
+ self.auth_event.set()
+
+ def _parse_userauth_banner(self, m):
+ banner = m.get_string()
+ self.banner = banner
+ self._log(INFO, "Auth banner: {}".format(banner))
+ # who cares.
+
+ def _parse_userauth_info_request(self, m):
+ if self.auth_method != "keyboard-interactive":
+ raise SSHException("Illegal info request from server")
+ title = m.get_text()
+ instructions = m.get_text()
+ m.get_binary() # lang
+ prompts = m.get_int()
+ prompt_list = []
+ for i in range(prompts):
+ prompt_list.append((m.get_text(), m.get_boolean()))
+ response_list = self.interactive_handler(
+ title, instructions, prompt_list
+ )
+
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_INFO_RESPONSE)
+ m.add_int(len(response_list))
+ for r in response_list:
+ m.add_string(r)
+ self.transport._send_message(m)
+
+ def _parse_userauth_info_response(self, m):
+ if not self.transport.server_mode:
+ raise SSHException("Illegal info response from server")
+ n = m.get_int()
+ responses = []
+ for i in range(n):
+ responses.append(m.get_text())
+ result = self.transport.server_object.check_auth_interactive_response(
+ responses
+ )
+ if isinstance(result, InteractiveQuery):
+ # make interactive query instead of response
+ self._interactive_query(result)
+ return
+ self._send_auth_result(
+ self.auth_username, "keyboard-interactive", result
+ )
+
+ def _handle_local_gss_failure(self, e):
+ self.transport.saved_exception = e
+ self._log(DEBUG, "GSSAPI failure: {}".format(e))
+ self._log(INFO, "Authentication ({}) failed.".format(self.auth_method))
+ self.authenticated = False
+ self.username = None
+ if self.auth_event is not None:
+ self.auth_event.set()
+ return
+
+ # TODO 4.0: MAY make sense to make these tables into actual
+ # classes/instances that can be fed a mode bool or whatever. Or,
+ # alternately (both?) make the message types small classes or enums that
+ # embed this info within themselves (which could also then tidy up the
+ # current 'integer -> human readable short string' stuff in common.py).
+ # TODO: if we do that, also expose 'em publicly.
+
+ # Messages which should be handled _by_ servers (sent by clients)
+ @property
+ def _server_handler_table(self):
+ return {
+ # TODO 4.0: MSG_SERVICE_REQUEST ought to eventually move into
+ # Transport's server mode like the client side did, just for
+ # consistency.
+ MSG_SERVICE_REQUEST: self._parse_service_request,
+ MSG_USERAUTH_REQUEST: self._parse_userauth_request,
+ MSG_USERAUTH_INFO_RESPONSE: self._parse_userauth_info_response,
+ }
+
+ # Messages which should be handled _by_ clients (sent by servers)
+ @property
+ def _client_handler_table(self):
+ return {
+ MSG_SERVICE_ACCEPT: self._parse_service_accept,
+ MSG_USERAUTH_SUCCESS: self._parse_userauth_success,
+ MSG_USERAUTH_FAILURE: self._parse_userauth_failure,
+ MSG_USERAUTH_BANNER: self._parse_userauth_banner,
+ MSG_USERAUTH_INFO_REQUEST: self._parse_userauth_info_request,
+ }
+
+ # NOTE: prior to the fix for #1283, this was a static dict instead of a
+ # property. Should be backwards compatible in most/all cases.
+ @property
+ def _handler_table(self):
+ if self.transport.server_mode:
+ return self._server_handler_table
+ else:
+ return self._client_handler_table
+
+
+class GssapiWithMicAuthHandler:
+ """A specialized Auth handler for gssapi-with-mic
+
+ During the GSSAPI token exchange we need a modified dispatch table,
+ because the packet type numbers are not unique.
+ """
+
+ method = "gssapi-with-mic"
+
+ def __init__(self, delegate, sshgss):
+ self._delegate = delegate
+ self.sshgss = sshgss
+
+ def abort(self):
+ self._restore_delegate_auth_handler()
+ return self._delegate.abort()
+
+ @property
+ def transport(self):
+ return self._delegate.transport
+
+ @property
+ def _send_auth_result(self):
+ return self._delegate._send_auth_result
+
+ @property
+ def auth_username(self):
+ return self._delegate.auth_username
+
+ @property
+ def gss_host(self):
+ return self._delegate.gss_host
+
+ def _restore_delegate_auth_handler(self):
+ self.transport.auth_handler = self._delegate
+
+ def _parse_userauth_gssapi_token(self, m):
+ client_token = m.get_string()
+ # use the client token as input to establish a secure
+ # context.
+ sshgss = self.sshgss
+ try:
+ token = sshgss.ssh_accept_sec_context(
+ self.gss_host, client_token, self.auth_username
+ )
+ except Exception as e:
+ self.transport.saved_exception = e
+ result = AUTH_FAILED
+ self._restore_delegate_auth_handler()
+ self._send_auth_result(self.auth_username, self.method, result)
+ raise
+ if token is not None:
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_TOKEN)
+ m.add_string(token)
+ self.transport._expected_packet = (
+ MSG_USERAUTH_GSSAPI_TOKEN,
+ MSG_USERAUTH_GSSAPI_MIC,
+ MSG_USERAUTH_REQUEST,
+ )
+ self.transport._send_message(m)
+
+ def _parse_userauth_gssapi_mic(self, m):
+ mic_token = m.get_string()
+ sshgss = self.sshgss
+ username = self.auth_username
+ self._restore_delegate_auth_handler()
+ try:
+ sshgss.ssh_check_mic(
+ mic_token, self.transport.session_id, username
+ )
+ except Exception as e:
+ self.transport.saved_exception = e
+ result = AUTH_FAILED
+ self._send_auth_result(username, self.method, result)
+ raise
+ # TODO: Implement client credential saving.
+ # The OpenSSH server is able to create a TGT with the delegated
+ # client credentials, but this is not supported by GSS-API.
+ result = AUTH_SUCCESSFUL
+ self.transport.server_object.check_auth_gssapi_with_mic(
+ username, result
+ )
+ # okay, send result
+ self._send_auth_result(username, self.method, result)
+
+ def _parse_service_request(self, m):
+ self._restore_delegate_auth_handler()
+ return self._delegate._parse_service_request(m)
+
+ def _parse_userauth_request(self, m):
+ self._restore_delegate_auth_handler()
+ return self._delegate._parse_userauth_request(m)
+
+ __handler_table = {
+ MSG_SERVICE_REQUEST: _parse_service_request,
+ MSG_USERAUTH_REQUEST: _parse_userauth_request,
+ MSG_USERAUTH_GSSAPI_TOKEN: _parse_userauth_gssapi_token,
+ MSG_USERAUTH_GSSAPI_MIC: _parse_userauth_gssapi_mic,
+ }
+
+ @property
+ def _handler_table(self):
+ # TODO: determine if we can cut this up like we did for the primary
+ # AuthHandler class.
+ return self.__handler_table
+
+
+class AuthOnlyHandler(AuthHandler):
+ """
+ AuthHandler, and just auth, no service requests!
+
+ .. versionadded:: 3.2
+ """
+
+ # NOTE: this purposefully duplicates some of the parent class in order to
+ # modernize, refactor, etc. The intent is that eventually we will collapse
+ # this one onto the parent in a backwards incompatible release.
+
+ @property
+ def _client_handler_table(self):
+ my_table = super()._client_handler_table.copy()
+ del my_table[MSG_SERVICE_ACCEPT]
+ return my_table
+
+ def send_auth_request(self, username, method, finish_message=None):
+ """
+ Submit a userauth request message & wait for response.
+
+ Performs the transport message send call, sets self.auth_event, and
+ will lock-n-block as necessary to both send, and wait for response to,
+ the USERAUTH_REQUEST.
+
+ Most callers will want to supply a callback to ``finish_message``,
+ which accepts a Message ``m`` and may call mutator methods on it to add
+ more fields.
+ """
+ # Store a few things for reference in handlers, including auth failure
+ # handler (which needs to know if we were using a bad method, etc)
+ self.auth_method = method
+ self.username = username
+ # Generic userauth request fields
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_REQUEST)
+ m.add_string(username)
+ m.add_string("ssh-connection")
+ m.add_string(method)
+ # Caller usually has more to say, such as injecting password, key etc
+ finish_message(m)
+ # TODO 4.0: seems odd to have the client handle the lock and not
+ # Transport; that _may_ have been an artifact of allowing user
+ # threading event injection? Regardless, we don't want to move _this_
+ # locking into Transport._send_message now, because lots of other
+ # untouched code also uses that method and we might end up
+ # double-locking (?) but 4.0 would be a good time to revisit.
+ with self.transport.lock:
+ self.transport._send_message(m)
+ # We have cut out the higher level event args, but self.auth_event is
+ # still required for self.wait_for_response to function correctly (it's
+ # the mechanism used by the auth success/failure handlers, the abort
+ # handler, and a few other spots like in gssapi.
+ # TODO: interestingly, wait_for_response itself doesn't actually
+ # enforce that its event argument and self.auth_event are the same...
+ self.auth_event = threading.Event()
+ return self.wait_for_response(self.auth_event)
+
+ def auth_none(self, username):
+ return self.send_auth_request(username, "none")
+
+ def auth_publickey(self, username, key):
+ key_type, bits = self._get_key_type_and_bits(key)
+ algorithm = self._finalize_pubkey_algorithm(key_type)
+ blob = self._get_session_blob(
+ key,
+ "ssh-connection",
+ username,
+ algorithm,
+ )
+
+ def finish(m):
+ # This field doesn't appear to be named, but is False when querying
+ # for permission (ie knowing whether to even prompt a user for
+ # passphrase, etc) or True when just going for it. Paramiko has
+ # never bothered with the former type of message, apparently.
+ m.add_boolean(True)
+ m.add_string(algorithm)
+ m.add_string(bits)
+ m.add_string(key.sign_ssh_data(blob, algorithm))
+
+ return self.send_auth_request(username, "publickey", finish)
+
+ def auth_password(self, username, password):
+ def finish(m):
+ # Unnamed field that equates to "I am changing my password", which
+ # Paramiko clientside never supported and serverside only sort of
+ # supported.
+ m.add_boolean(False)
+ m.add_string(b(password))
+
+ return self.send_auth_request(username, "password", finish)
+
+ def auth_interactive(self, username, handler, submethods=""):
+ """
+ response_list = handler(title, instructions, prompt_list)
+ """
+ # Unlike most siblings, this auth method _does_ require other
+ # superclass handlers (eg userauth info request) to understand
+ # what's going on, so we still set some self attributes.
+ self.auth_method = "keyboard_interactive"
+ self.interactive_handler = handler
+
+ def finish(m):
+ # Empty string for deprecated language tag field, per RFC 4256:
+ # https://www.rfc-editor.org/rfc/rfc4256#section-3.1
+ m.add_string("")
+ m.add_string(submethods)
+
+ return self.send_auth_request(username, "keyboard-interactive", finish)
+
+ # NOTE: not strictly 'auth only' related, but allows users to opt-in.
+ def _choose_fallback_pubkey_algorithm(self, key_type, my_algos):
+ msg = "Server did not send a server-sig-algs list; defaulting to something in our preferred algorithms list" # noqa
+ self._log(DEBUG, msg)
+ noncert_key_type = key_type.replace("-cert-v01@openssh.com", "")
+ if key_type in my_algos or noncert_key_type in my_algos:
+ actual = key_type if key_type in my_algos else noncert_key_type
+ msg = f"Current key type, {actual!r}, is in our preferred list; using that" # noqa
+ algo = actual
+ else:
+ algo = my_algos[0]
+ msg = f"{key_type!r} not in our list - trying first list item instead, {algo!r}" # noqa
+ self._log(DEBUG, msg)
+ return algo
diff --git a/paramiko/auth_strategy.py b/paramiko/auth_strategy.py
new file mode 100644
index 0000000..03c1d87
--- /dev/null
+++ b/paramiko/auth_strategy.py
@@ -0,0 +1,306 @@
+"""
+Modern, adaptable authentication machinery.
+
+Replaces certain parts of `.SSHClient`. For a concrete implementation, see the
+``OpenSSHAuthStrategy`` class in `Fabric <https://fabfile.org>`_.
+"""
+
+from collections import namedtuple
+
+from .agent import AgentKey
+from .util import get_logger
+from .ssh_exception import AuthenticationException
+
+
+class AuthSource:
+ """
+ Some SSH authentication source, such as a password, private key, or agent.
+
+ See subclasses in this module for concrete implementations.
+
+ All implementations must accept at least a ``username`` (``str``) kwarg.
+ """
+
+ def __init__(self, username):
+ self.username = username
+
+ def _repr(self, **kwargs):
+ # TODO: are there any good libs for this? maybe some helper from
+ # structlog?
+ pairs = [f"{k}={v!r}" for k, v in kwargs.items()]
+ joined = ", ".join(pairs)
+ return f"{self.__class__.__name__}({joined})"
+
+ def __repr__(self):
+ return self._repr()
+
+ def authenticate(self, transport):
+ """
+ Perform authentication.
+ """
+ raise NotImplementedError
+
+
+class NoneAuth(AuthSource):
+ """
+ Auth type "none", ie https://www.rfc-editor.org/rfc/rfc4252#section-5.2 .
+ """
+
+ def authenticate(self, transport):
+ return transport.auth_none(self.username)
+
+
+class Password(AuthSource):
+ """
+ Password authentication.
+
+ :param callable password_getter:
+ A lazy callable that should return a `str` password value at
+ authentication time, such as a `functools.partial` wrapping
+ `getpass.getpass`, an API call to a secrets store, or similar.
+
+ If you already know the password at instantiation time, you should
+ simply use something like ``lambda: "my literal"`` (for a literal, but
+ also, shame on you!) or ``lambda: variable_name`` (for something stored
+ in a variable).
+ """
+
+ def __init__(self, username, password_getter):
+ super().__init__(username=username)
+ self.password_getter = password_getter
+
+ def __repr__(self):
+ # Password auth is marginally more 'username-caring' than pkeys, so may
+ # as well log that info here.
+ return super()._repr(user=self.username)
+
+ def authenticate(self, transport):
+ # Lazily get the password, in case it's prompting a user
+ # TODO: be nice to log source _of_ the password?
+ password = self.password_getter()
+ return transport.auth_password(self.username, password)
+
+
+# TODO 4.0: twiddle this, or PKey, or both, so they're more obviously distinct.
+# TODO 4.0: the obvious is to make this more wordy (PrivateKeyAuth), the
+# minimalist approach might be to rename PKey to just Key (esp given all the
+# subclasses are WhateverKey and not WhateverPKey)
+class PrivateKey(AuthSource):
+ """
+ Essentially a mixin for private keys.
+
+ Knows how to auth, but leaves key material discovery/loading/decryption to
+ subclasses.
+
+ Subclasses **must** ensure that they've set ``self.pkey`` to a decrypted
+ `.PKey` instance before calling ``super().authenticate``; typically
+ either in their ``__init__``, or in an overridden ``authenticate`` prior to
+ its `super` call.
+ """
+
+ def authenticate(self, transport):
+ return transport.auth_publickey(self.username, self.pkey)
+
+
+class InMemoryPrivateKey(PrivateKey):
+ """
+ An in-memory, decrypted `.PKey` object.
+ """
+
+ def __init__(self, username, pkey):
+ super().__init__(username=username)
+ # No decryption (presumably) necessary!
+ self.pkey = pkey
+
+ def __repr__(self):
+ # NOTE: most of interesting repr-bits for private keys is in PKey.
+ # TODO: tacking on agent-ness like this is a bit awkward, but, eh?
+ rep = super()._repr(pkey=self.pkey)
+ if isinstance(self.pkey, AgentKey):
+ rep += " [agent]"
+ return rep
+
+
+class OnDiskPrivateKey(PrivateKey):
+ """
+ Some on-disk private key that needs opening and possibly decrypting.
+
+ :param str source:
+ String tracking where this key's path was specified; should be one of
+ ``"ssh-config"``, ``"python-config"``, or ``"implicit-home"``.
+ :param Path path:
+ The filesystem path this key was loaded from.
+ :param PKey pkey:
+ The `PKey` object this auth source uses/represents.
+ """
+
+ def __init__(self, username, source, path, pkey):
+ super().__init__(username=username)
+ self.source = source
+ allowed = ("ssh-config", "python-config", "implicit-home")
+ if source not in allowed:
+ raise ValueError(f"source argument must be one of: {allowed!r}")
+ self.path = path
+ # Superclass wants .pkey, other two are mostly for display/debugging.
+ self.pkey = pkey
+
+ def __repr__(self):
+ return self._repr(
+ key=self.pkey, source=self.source, path=str(self.path)
+ )
+
+
+# TODO re sources: is there anything in an OpenSSH config file that doesn't fit
+# into what Paramiko already had kwargs for?
+
+
+SourceResult = namedtuple("SourceResult", ["source", "result"])
+
+# TODO: tempting to make this an OrderedDict, except the keys essentially want
+# to be rich objects (AuthSources) which do not make for useful user indexing?
+# TODO: members being vanilla tuples is pretty old-school/expedient; they
+# "really" want to be something that's type friendlier (unless the tuple's 2nd
+# member being a Union of two types is "fine"?), which I assume means yet more
+# classes, eg an abstract SourceResult with concrete AuthSuccess and
+# AuthFailure children?
+# TODO: arguably we want __init__ typechecking of the members (or to leverage
+# mypy by classifying this literally as list-of-AuthSource?)
+class AuthResult(list):
+ """
+ Represents a partial or complete SSH authentication attempt.
+
+ This class conceptually extends `AuthStrategy` by pairing the former's
+ authentication **sources** with the **results** of trying to authenticate
+ with them.
+
+ `AuthResult` is a (subclass of) `list` of `namedtuple`, which are of the
+ form ``namedtuple('SourceResult', 'source', 'result')`` (where the
+ ``source`` member is an `AuthSource` and the ``result`` member is either a
+ return value from the relevant `.Transport` method, or an exception
+ object).
+
+ .. note::
+ Transport auth method results are always themselves a ``list`` of "next
+ allowable authentication methods".
+
+ In the simple case of "you just authenticated successfully", it's an
+ empty list; if your auth was rejected but you're allowed to try again,
+ it will be a list of string method names like ``pubkey`` or
+ ``password``.
+
+ The ``__str__`` of this class represents the empty-list scenario as the
+ word ``success``, which should make reading the result of an
+ authentication session more obvious to humans.
+
+ Instances also have a `strategy` attribute referencing the `AuthStrategy`
+ which was attempted.
+ """
+
+ def __init__(self, strategy, *args, **kwargs):
+ self.strategy = strategy
+ super().__init__(*args, **kwargs)
+
+ def __str__(self):
+ # NOTE: meaningfully distinct from __repr__, which still wants to use
+ # superclass' implementation.
+ # TODO: go hog wild, use rich.Table? how is that on degraded term's?
+ # TODO: test this lol
+ return "\n".join(
+ f"{x.source} -> {x.result or 'success'}" for x in self
+ )
+
+
+# TODO 4.0: descend from SSHException or even just Exception
+class AuthFailure(AuthenticationException):
+ """
+ Basic exception wrapping an `AuthResult` indicating overall auth failure.
+
+ Note that `AuthFailure` descends from `AuthenticationException` but is
+ generally "higher level"; the latter is now only raised by individual
+ `AuthSource` attempts and should typically only be seen by users when
+ encapsulated in this class. It subclasses `AuthenticationException`
+ primarily for backwards compatibility reasons.
+ """
+
+ def __init__(self, result):
+ self.result = result
+
+ def __str__(self):
+ return "\n" + str(self.result)
+
+
+class AuthStrategy:
+ """
+ This class represents one or more attempts to auth with an SSH server.
+
+ By default, subclasses must at least accept an ``ssh_config``
+ (`.SSHConfig`) keyword argument, but may opt to accept more as needed for
+ their particular strategy.
+ """
+
+ def __init__(
+ self,
+ ssh_config,
+ ):
+ self.ssh_config = ssh_config
+ self.log = get_logger(__name__)
+
+ def get_sources(self):
+ """
+ Generator yielding `AuthSource` instances, in the order to try.
+
+ This is the primary override point for subclasses: you figure out what
+ sources you need, and ``yield`` them.
+
+ Subclasses _of_ subclasses may find themselves wanting to do things
+ like filtering or discarding around a call to `super`.
+ """
+ raise NotImplementedError
+
+ def authenticate(self, transport):
+ """
+ Handles attempting `AuthSource` instances yielded from `get_sources`.
+
+ You *normally* won't need to override this, but it's an option for
+ advanced users.
+ """
+ succeeded = False
+ overall_result = AuthResult(strategy=self)
+ # TODO: arguably we could fit in a "send none auth, record allowed auth
+ # types sent back" thing here as OpenSSH-client does, but that likely
+ # wants to live in fabric.OpenSSHAuthStrategy as not all target servers
+ # will implement it!
+ # TODO: needs better "server told us too many attempts" checking!
+ for source in self.get_sources():
+ self.log.debug(f"Trying {source}")
+ try: # NOTE: this really wants to _only_ wrap the authenticate()!
+ result = source.authenticate(transport)
+ succeeded = True
+ # TODO: 'except PartialAuthentication' is needed for 2FA and
+ # similar, as per old SSHClient.connect - it is the only way
+ # AuthHandler supplies access to the 'name-list' field from
+ # MSG_USERAUTH_FAILURE, at present.
+ except Exception as e:
+ result = e
+ # TODO: look at what this could possibly raise, we don't really
+ # want Exception here, right? just SSHException subclasses? or
+ # do we truly want to capture anything at all with assumption
+ # it's easy enough for users to look afterwards?
+ # NOTE: showing type, not message, for tersity & also most of
+ # the time it's basically just "Authentication failed."
+ source_class = e.__class__.__name__
+ self.log.info(
+ f"Authentication via {source} failed with {source_class}"
+ )
+ overall_result.append(SourceResult(source, result))
+ if succeeded:
+ break
+ # Gotta die here if nothing worked, otherwise Transport's main loop
+ # just kinda hangs out until something times out!
+ if not succeeded:
+ raise AuthFailure(result=overall_result)
+ # Success: give back what was done, in case they care.
+ return overall_result
+
+ # TODO: is there anything OpenSSH client does which _can't_ cleanly map to
+ # iterating a generator?
diff --git a/paramiko/ber.py b/paramiko/ber.py
new file mode 100644
index 0000000..b8287f5
--- /dev/null
+++ b/paramiko/ber.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from paramiko.common import max_byte, zero_byte, byte_ord, byte_chr
+
+import paramiko.util as util
+from paramiko.util import b
+from paramiko.sftp import int64
+
+
+class BERException(Exception):
+ pass
+
+
+class BER:
+ """
+ Robey's tiny little attempt at a BER decoder.
+ """
+
+ def __init__(self, content=bytes()):
+ self.content = b(content)
+ self.idx = 0
+
+ def asbytes(self):
+ return self.content
+
+ def __str__(self):
+ return self.asbytes()
+
+ def __repr__(self):
+ return "BER('" + repr(self.content) + "')"
+
+ def decode(self):
+ return self.decode_next()
+
+ def decode_next(self):
+ if self.idx >= len(self.content):
+ return None
+ ident = byte_ord(self.content[self.idx])
+ self.idx += 1
+ if (ident & 31) == 31:
+ # identifier > 30
+ ident = 0
+ while self.idx < len(self.content):
+ t = byte_ord(self.content[self.idx])
+ self.idx += 1
+ ident = (ident << 7) | (t & 0x7F)
+ if not (t & 0x80):
+ break
+ if self.idx >= len(self.content):
+ return None
+ # now fetch length
+ size = byte_ord(self.content[self.idx])
+ self.idx += 1
+ if size & 0x80:
+ # more complimicated...
+ # FIXME: theoretically should handle indefinite-length (0x80)
+ t = size & 0x7F
+ if self.idx + t > len(self.content):
+ return None
+ size = util.inflate_long(
+ self.content[self.idx : self.idx + t], True
+ )
+ self.idx += t
+ if self.idx + size > len(self.content):
+ # can't fit
+ return None
+ data = self.content[self.idx : self.idx + size]
+ self.idx += size
+ # now switch on id
+ if ident == 0x30:
+ # sequence
+ return self.decode_sequence(data)
+ elif ident == 2:
+ # int
+ return util.inflate_long(data)
+ else:
+ # 1: boolean (00 false, otherwise true)
+ msg = "Unknown ber encoding type {:d} (robey is lazy)"
+ raise BERException(msg.format(ident))
+
+ @staticmethod
+ def decode_sequence(data):
+ out = []
+ ber = BER(data)
+ while True:
+ x = ber.decode_next()
+ if x is None:
+ break
+ out.append(x)
+ return out
+
+ def encode_tlv(self, ident, val):
+ # no need to support ident > 31 here
+ self.content += byte_chr(ident)
+ if len(val) > 0x7F:
+ lenstr = util.deflate_long(len(val))
+ self.content += byte_chr(0x80 + len(lenstr)) + lenstr
+ else:
+ self.content += byte_chr(len(val))
+ self.content += val
+
+ def encode(self, x):
+ if type(x) is bool:
+ if x:
+ self.encode_tlv(1, max_byte)
+ else:
+ self.encode_tlv(1, zero_byte)
+ elif (type(x) is int) or (type(x) is int64):
+ self.encode_tlv(2, util.deflate_long(x))
+ elif type(x) is str:
+ self.encode_tlv(4, x)
+ elif (type(x) is list) or (type(x) is tuple):
+ self.encode_tlv(0x30, self.encode_sequence(x))
+ else:
+ raise BERException(
+ "Unknown type for encoding: {!r}".format(type(x))
+ )
+
+ @staticmethod
+ def encode_sequence(data):
+ ber = BER()
+ for item in data:
+ ber.encode(item)
+ return ber.asbytes()
diff --git a/paramiko/buffered_pipe.py b/paramiko/buffered_pipe.py
new file mode 100644
index 0000000..c19279c
--- /dev/null
+++ b/paramiko/buffered_pipe.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Attempt to generalize the "feeder" part of a `.Channel`: an object which can be
+read from and closed, but is reading from a buffer fed by another thread. The
+read operations are blocking and can have a timeout set.
+"""
+
+import array
+import threading
+import time
+from paramiko.util import b
+
+
+class PipeTimeout(IOError):
+ """
+ Indicates that a timeout was reached on a read from a `.BufferedPipe`.
+ """
+
+ pass
+
+
+class BufferedPipe:
+ """
+ A buffer that obeys normal read (with timeout) & close semantics for a
+ file or socket, but is fed data from another thread. This is used by
+ `.Channel`.
+ """
+
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._cv = threading.Condition(self._lock)
+ self._event = None
+ self._buffer = array.array("B")
+ self._closed = False
+
+ def _buffer_frombytes(self, data):
+ self._buffer.frombytes(data)
+
+ def _buffer_tobytes(self, limit=None):
+ return self._buffer[:limit].tobytes()
+
+ def set_event(self, event):
+ """
+ Set an event on this buffer. When data is ready to be read (or the
+ buffer has been closed), the event will be set. When no data is
+ ready, the event will be cleared.
+
+ :param threading.Event event: the event to set/clear
+ """
+ self._lock.acquire()
+ try:
+ self._event = event
+ # Make sure the event starts in `set` state if we appear to already
+ # be closed; otherwise, if we start in `clear` state & are closed,
+ # nothing will ever call `.feed` and the event (& OS pipe, if we're
+ # wrapping one - see `Channel.fileno`) will permanently stay in
+ # `clear`, causing deadlock if e.g. `select`ed upon.
+ if self._closed or len(self._buffer) > 0:
+ event.set()
+ else:
+ event.clear()
+ finally:
+ self._lock.release()
+
+ def feed(self, data):
+ """
+ Feed new data into this pipe. This method is assumed to be called
+ from a separate thread, so synchronization is done.
+
+ :param data: the data to add, as a ``str`` or ``bytes``
+ """
+ self._lock.acquire()
+ try:
+ if self._event is not None:
+ self._event.set()
+ self._buffer_frombytes(b(data))
+ self._cv.notify_all()
+ finally:
+ self._lock.release()
+
+ def read_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ feeder. A ``False`` result does not mean that the feeder has closed;
+ it means you may need to wait before more data arrives.
+
+ :return:
+ ``True`` if a `read` call would immediately return at least one
+ byte; ``False`` otherwise.
+ """
+ self._lock.acquire()
+ try:
+ if len(self._buffer) == 0:
+ return False
+ return True
+ finally:
+ self._lock.release()
+
+ def read(self, nbytes, timeout=None):
+ """
+ Read data from the pipe. The return value is a string representing
+ the data received. The maximum amount of data to be received at once
+ is specified by ``nbytes``. If a string of length zero is returned,
+ the pipe has been closed.
+
+ The optional ``timeout`` argument can be a nonnegative float expressing
+ seconds, or ``None`` for no timeout. If a float is given, a
+ `.PipeTimeout` will be raised if the timeout period value has elapsed
+ before any data arrives.
+
+ :param int nbytes: maximum number of bytes to read
+ :param float timeout:
+ maximum seconds to wait (or ``None``, the default, to wait forever)
+ :return: the read data, as a ``str`` or ``bytes``
+
+ :raises:
+ `.PipeTimeout` -- if a timeout was specified and no data was ready
+ before that timeout
+ """
+ out = bytes()
+ self._lock.acquire()
+ try:
+ if len(self._buffer) == 0:
+ if self._closed:
+ return out
+ # should we block?
+ if timeout == 0.0:
+ raise PipeTimeout()
+ # loop here in case we get woken up but a different thread has
+ # grabbed everything in the buffer.
+ while (len(self._buffer) == 0) and not self._closed:
+ then = time.time()
+ self._cv.wait(timeout)
+ if timeout is not None:
+ timeout -= time.time() - then
+ if timeout <= 0.0:
+ raise PipeTimeout()
+
+ # something's in the buffer and we have the lock!
+ if len(self._buffer) <= nbytes:
+ out = self._buffer_tobytes()
+ del self._buffer[:]
+ if (self._event is not None) and not self._closed:
+ self._event.clear()
+ else:
+ out = self._buffer_tobytes(nbytes)
+ del self._buffer[:nbytes]
+ finally:
+ self._lock.release()
+
+ return out
+
+ def empty(self):
+ """
+ Clear out the buffer and return all data that was in it.
+
+ :return:
+ any data that was in the buffer prior to clearing it out, as a
+ `str`
+ """
+ self._lock.acquire()
+ try:
+ out = self._buffer_tobytes()
+ del self._buffer[:]
+ if (self._event is not None) and not self._closed:
+ self._event.clear()
+ return out
+ finally:
+ self._lock.release()
+
+ def close(self):
+ """
+ Close this pipe object. Future calls to `read` after the buffer
+ has been emptied will return immediately with an empty string.
+ """
+ self._lock.acquire()
+ try:
+ self._closed = True
+ self._cv.notify_all()
+ if self._event is not None:
+ self._event.set()
+ finally:
+ self._lock.release()
+
+ def __len__(self):
+ """
+ Return the number of bytes buffered.
+
+ :return: number (`int`) of bytes buffered
+ """
+ self._lock.acquire()
+ try:
+ return len(self._buffer)
+ finally:
+ self._lock.release()
diff --git a/paramiko/channel.py b/paramiko/channel.py
new file mode 100644
index 0000000..2757450
--- /dev/null
+++ b/paramiko/channel.py
@@ -0,0 +1,1390 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Abstraction for an SSH2 channel.
+"""
+
+import binascii
+import os
+import socket
+import time
+import threading
+
+from functools import wraps
+
+from paramiko import util
+from paramiko.common import (
+ cMSG_CHANNEL_REQUEST,
+ cMSG_CHANNEL_WINDOW_ADJUST,
+ cMSG_CHANNEL_DATA,
+ cMSG_CHANNEL_EXTENDED_DATA,
+ DEBUG,
+ ERROR,
+ cMSG_CHANNEL_SUCCESS,
+ cMSG_CHANNEL_FAILURE,
+ cMSG_CHANNEL_EOF,
+ cMSG_CHANNEL_CLOSE,
+)
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+from paramiko.file import BufferedFile
+from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
+from paramiko import pipe
+from paramiko.util import ClosingContextManager
+
+
+def open_only(func):
+ """
+ Decorator for `.Channel` methods which performs an openness check.
+
+ :raises:
+ `.SSHException` -- If the wrapped method is called on an unopened
+ `.Channel`.
+ """
+
+ @wraps(func)
+ def _check(self, *args, **kwds):
+ if (
+ self.closed
+ or self.eof_received
+ or self.eof_sent
+ or not self.active
+ ):
+ raise SSHException("Channel is not open")
+ return func(self, *args, **kwds)
+
+ return _check
+
+
+class Channel(ClosingContextManager):
+ """
+ A secure tunnel across an SSH `.Transport`. A Channel is meant to behave
+ like a socket, and has an API that should be indistinguishable from the
+ Python socket API.
+
+ Because SSH2 has a windowing kind of flow control, if you stop reading data
+ from a Channel and its buffer fills up, the server will be unable to send
+ you any more data until you read some of it. (This won't affect other
+ channels on the same transport -- all channels on a single transport are
+ flow-controlled independently.) Similarly, if the server isn't reading
+ data you send, calls to `send` may block, unless you set a timeout. This
+ is exactly like a normal network socket, so it shouldn't be too surprising.
+
+ Instances of this class may be used as context managers.
+ """
+
+ def __init__(self, chanid):
+ """
+ Create a new channel. The channel is not associated with any
+ particular session or `.Transport` until the Transport attaches it.
+ Normally you would only call this method from the constructor of a
+ subclass of `.Channel`.
+
+ :param int chanid:
+ the ID of this channel, as passed by an existing `.Transport`.
+ """
+ #: Channel ID
+ self.chanid = chanid
+ #: Remote channel ID
+ self.remote_chanid = 0
+ #: `.Transport` managing this channel
+ self.transport = None
+ #: Whether the connection is presently active
+ self.active = False
+ self.eof_received = 0
+ self.eof_sent = 0
+ self.in_buffer = BufferedPipe()
+ self.in_stderr_buffer = BufferedPipe()
+ self.timeout = None
+ #: Whether the connection has been closed
+ self.closed = False
+ self.ultra_debug = False
+ self.lock = threading.Lock()
+ self.out_buffer_cv = threading.Condition(self.lock)
+ self.in_window_size = 0
+ self.out_window_size = 0
+ self.in_max_packet_size = 0
+ self.out_max_packet_size = 0
+ self.in_window_threshold = 0
+ self.in_window_sofar = 0
+ self.status_event = threading.Event()
+ self._name = str(chanid)
+ self.logger = util.get_logger("paramiko.transport")
+ self._pipe = None
+ self.event = threading.Event()
+ self.event_ready = False
+ self.combine_stderr = False
+ self.exit_status = -1
+ self.origin_addr = None
+
+ def __del__(self):
+ try:
+ self.close()
+ except:
+ pass
+
+ def __repr__(self):
+ """
+ Return a string representation of this object, for debugging.
+ """
+ out = "<paramiko.Channel {}".format(self.chanid)
+ if self.closed:
+ out += " (closed)"
+ elif self.active:
+ if self.eof_received:
+ out += " (EOF received)"
+ if self.eof_sent:
+ out += " (EOF sent)"
+ out += " (open) window={}".format(self.out_window_size)
+ if len(self.in_buffer) > 0:
+ out += " in-buffer={}".format(len(self.in_buffer))
+ out += " -> " + repr(self.transport)
+ out += ">"
+ return out
+
+ @open_only
+ def get_pty(
+ self,
+ term="vt100",
+ width=80,
+ height=24,
+ width_pixels=0,
+ height_pixels=0,
+ ):
+ """
+ Request a pseudo-terminal from the server. This is usually used right
+ after creating a client channel, to ask the server to provide some
+ basic terminal semantics for a shell invoked with `invoke_shell`.
+ It isn't necessary (or desirable) to call this method if you're going
+ to execute a single command with `exec_command`.
+
+ :param str term: the terminal type to emulate
+ (for example, ``'vt100'``)
+ :param int width: width (in characters) of the terminal screen
+ :param int height: height (in characters) of the terminal screen
+ :param int width_pixels: width (in pixels) of the terminal screen
+ :param int height_pixels: height (in pixels) of the terminal screen
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("pty-req")
+ m.add_boolean(True)
+ m.add_string(term)
+ m.add_int(width)
+ m.add_int(height)
+ m.add_int(width_pixels)
+ m.add_int(height_pixels)
+ m.add_string(bytes())
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def invoke_shell(self):
+ """
+ Request an interactive shell session on this channel. If the server
+ allows it, the channel will then be directly connected to the stdin,
+ stdout, and stderr of the shell.
+
+ Normally you would call `get_pty` before this, in which case the
+ shell will operate through the pty, and the channel will be connected
+ to the stdin and stdout of the pty.
+
+ When the shell exits, the channel will be closed and can't be reused.
+ You must open a new channel if you wish to open another shell.
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("shell")
+ m.add_boolean(True)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def exec_command(self, command):
+ """
+ Execute a command on the server. If the server allows it, the channel
+ will then be directly connected to the stdin, stdout, and stderr of
+ the command being executed.
+
+ When the command finishes executing, the channel will be closed and
+ can't be reused. You must open a new channel if you wish to execute
+ another command.
+
+ :param str command: a shell command to execute.
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("exec")
+ m.add_boolean(True)
+ m.add_string(command)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def invoke_subsystem(self, subsystem):
+ """
+ Request a subsystem on the server (for example, ``sftp``). If the
+ server allows it, the channel will then be directly connected to the
+ requested subsystem.
+
+ When the subsystem finishes, the channel will be closed and can't be
+ reused.
+
+ :param str subsystem: name of the subsystem being requested.
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("subsystem")
+ m.add_boolean(True)
+ m.add_string(subsystem)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def resize_pty(self, width=80, height=24, width_pixels=0, height_pixels=0):
+ """
+ Resize the pseudo-terminal. This can be used to change the width and
+ height of the terminal emulation created in a previous `get_pty` call.
+
+ :param int width: new width (in characters) of the terminal screen
+ :param int height: new height (in characters) of the terminal screen
+ :param int width_pixels: new width (in pixels) of the terminal screen
+ :param int height_pixels: new height (in pixels) of the terminal screen
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("window-change")
+ m.add_boolean(False)
+ m.add_int(width)
+ m.add_int(height)
+ m.add_int(width_pixels)
+ m.add_int(height_pixels)
+ self.transport._send_user_message(m)
+
+ @open_only
+ def update_environment(self, environment):
+ """
+ Updates this channel's remote shell environment.
+
+ .. note::
+ This operation is additive - i.e. the current environment is not
+ reset before the given environment variables are set.
+
+ .. warning::
+ Servers may silently reject some environment variables; see the
+ warning in `set_environment_variable` for details.
+
+ :param dict environment:
+ a dictionary containing the name and respective values to set
+ :raises:
+ `.SSHException` -- if any of the environment variables was rejected
+ by the server or the channel was closed
+ """
+ for name, value in environment.items():
+ try:
+ self.set_environment_variable(name, value)
+ except SSHException as e:
+ err = 'Failed to set environment variable "{}".'
+ raise SSHException(err.format(name), e)
+
+ @open_only
+ def set_environment_variable(self, name, value):
+ """
+ Set the value of an environment variable.
+
+ .. warning::
+ The server may reject this request depending on its ``AcceptEnv``
+ setting; such rejections will fail silently (which is common client
+ practice for this particular request type). Make sure you
+ understand your server's configuration before using!
+
+ :param str name: name of the environment variable
+ :param str value: value of the environment variable
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("env")
+ m.add_boolean(False)
+ m.add_string(name)
+ m.add_string(value)
+ self.transport._send_user_message(m)
+
+ def exit_status_ready(self):
+ """
+ Return true if the remote process has exited and returned an exit
+ status. You may use this to poll the process status if you don't
+ want to block in `recv_exit_status`. Note that the server may not
+ return an exit status in some cases (like bad servers).
+
+ :return:
+ ``True`` if `recv_exit_status` will return immediately, else
+ ``False``.
+
+ .. versionadded:: 1.7.3
+ """
+ return self.closed or self.status_event.is_set()
+
+ def recv_exit_status(self):
+ """
+ Return the exit status from the process on the server. This is
+ mostly useful for retrieving the results of an `exec_command`.
+ If the command hasn't finished yet, this method will wait until
+ it does, or until the channel is closed. If no exit status is
+ provided by the server, -1 is returned.
+
+ .. warning::
+ In some situations, receiving remote output larger than the current
+ `.Transport` or session's ``window_size`` (e.g. that set by the
+ ``default_window_size`` kwarg for `.Transport.__init__`) will cause
+ `.recv_exit_status` to hang indefinitely if it is called prior to a
+ sufficiently large `.Channel.recv` (or if there are no threads
+ calling `.Channel.recv` in the background).
+
+ In these cases, ensuring that `.recv_exit_status` is called *after*
+ `.Channel.recv` (or, again, using threads) can avoid the hang.
+
+ :return: the exit code (as an `int`) of the process on the server.
+
+ .. versionadded:: 1.2
+ """
+ self.status_event.wait()
+ assert self.status_event.is_set()
+ return self.exit_status
+
+ def send_exit_status(self, status):
+ """
+ Send the exit status of an executed command to the client. (This
+ really only makes sense in server mode.) Many clients expect to
+ get some sort of status code back from an executed command after
+ it completes.
+
+ :param int status: the exit code of the process
+
+ .. versionadded:: 1.2
+ """
+ # in many cases, the channel will not still be open here.
+ # that's fine.
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("exit-status")
+ m.add_boolean(False)
+ m.add_int(status)
+ self.transport._send_user_message(m)
+
+ @open_only
+ def request_x11(
+ self,
+ screen_number=0,
+ auth_protocol=None,
+ auth_cookie=None,
+ single_connection=False,
+ handler=None,
+ ):
+ """
+ Request an x11 session on this channel. If the server allows it,
+ further x11 requests can be made from the server to the client,
+ when an x11 application is run in a shell session.
+
+ From :rfc:`4254`::
+
+ It is RECOMMENDED that the 'x11 authentication cookie' that is
+ sent be a fake, random cookie, and that the cookie be checked and
+ replaced by the real cookie when a connection request is received.
+
+ If you omit the auth_cookie, a new secure random 128-bit value will be
+ generated, used, and returned. You will need to use this value to
+ verify incoming x11 requests and replace them with the actual local
+ x11 cookie (which requires some knowledge of the x11 protocol).
+
+ If a handler is passed in, the handler is called from another thread
+ whenever a new x11 connection arrives. The default handler queues up
+ incoming x11 connections, which may be retrieved using
+ `.Transport.accept`. The handler's calling signature is::
+
+ handler(channel: Channel, (address: str, port: int))
+
+ :param int screen_number: the x11 screen number (0, 10, etc.)
+ :param str auth_protocol:
+ the name of the X11 authentication method used; if none is given,
+ ``"MIT-MAGIC-COOKIE-1"`` is used
+ :param str auth_cookie:
+ hexadecimal string containing the x11 auth cookie; if none is
+ given, a secure random 128-bit value is generated
+ :param bool single_connection:
+ if True, only a single x11 connection will be forwarded (by
+ default, any number of x11 connections can arrive over this
+ session)
+ :param handler:
+ an optional callable handler to use for incoming X11 connections
+ :return: the auth_cookie used
+ """
+ if auth_protocol is None:
+ auth_protocol = "MIT-MAGIC-COOKIE-1"
+ if auth_cookie is None:
+ auth_cookie = binascii.hexlify(os.urandom(16))
+
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("x11-req")
+ m.add_boolean(True)
+ m.add_boolean(single_connection)
+ m.add_string(auth_protocol)
+ m.add_string(auth_cookie)
+ m.add_int(screen_number)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+ self.transport._set_x11_handler(handler)
+ return auth_cookie
+
+ @open_only
+ def request_forward_agent(self, handler):
+ """
+ Request for a forward SSH Agent on this channel.
+ This is only valid for an ssh-agent from OpenSSH !!!
+
+ :param handler:
+ a required callable handler to use for incoming SSH Agent
+ connections
+
+ :return: True if we are ok, else False
+ (at that time we always return ok)
+
+ :raises: SSHException in case of channel problem.
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("auth-agent-req@openssh.com")
+ m.add_boolean(False)
+ self.transport._send_user_message(m)
+ self.transport._set_forward_agent_handler(handler)
+ return True
+
+ def get_transport(self):
+ """
+ Return the `.Transport` associated with this channel.
+ """
+ return self.transport
+
+ def set_name(self, name):
+ """
+ Set a name for this channel. Currently it's only used to set the name
+ of the channel in logfile entries. The name can be fetched with the
+ `get_name` method.
+
+ :param str name: new channel name
+ """
+ self._name = name
+
+ def get_name(self):
+ """
+ Get the name of this channel that was previously set by `set_name`.
+ """
+ return self._name
+
+ def get_id(self):
+ """
+ Return the `int` ID # for this channel.
+
+ The channel ID is unique across a `.Transport` and usually a small
+ number. It's also the number passed to
+ `.ServerInterface.check_channel_request` when determining whether to
+ accept a channel request in server mode.
+ """
+ return self.chanid
+
+ def set_combine_stderr(self, combine):
+ """
+ Set whether stderr should be combined into stdout on this channel.
+ The default is ``False``, but in some cases it may be convenient to
+ have both streams combined.
+
+ If this is ``False``, and `exec_command` is called (or ``invoke_shell``
+ with no pty), output to stderr will not show up through the `recv`
+ and `recv_ready` calls. You will have to use `recv_stderr` and
+ `recv_stderr_ready` to get stderr output.
+
+ If this is ``True``, data will never show up via `recv_stderr` or
+ `recv_stderr_ready`.
+
+ :param bool combine:
+ ``True`` if stderr output should be combined into stdout on this
+ channel.
+ :return: the previous setting (a `bool`).
+
+ .. versionadded:: 1.1
+ """
+ data = bytes()
+ self.lock.acquire()
+ try:
+ old = self.combine_stderr
+ self.combine_stderr = combine
+ if combine and not old:
+ # copy old stderr buffer into primary buffer
+ data = self.in_stderr_buffer.empty()
+ finally:
+ self.lock.release()
+ if len(data) > 0:
+ self._feed(data)
+ return old
+
+ # ...socket API...
+
+ def settimeout(self, timeout):
+ """
+ Set a timeout on blocking read/write operations. The ``timeout``
+ argument can be a nonnegative float expressing seconds, or ``None``.
+ If a float is given, subsequent channel read/write operations will
+ raise a timeout exception if the timeout period value has elapsed
+ before the operation has completed. Setting a timeout of ``None``
+ disables timeouts on socket operations.
+
+ ``chan.settimeout(0.0)`` is equivalent to ``chan.setblocking(0)``;
+ ``chan.settimeout(None)`` is equivalent to ``chan.setblocking(1)``.
+
+ :param float timeout:
+ seconds to wait for a pending read/write operation before raising
+ ``socket.timeout``, or ``None`` for no timeout.
+ """
+ self.timeout = timeout
+
+ def gettimeout(self):
+ """
+ Returns the timeout in seconds (as a float) associated with socket
+ operations, or ``None`` if no timeout is set. This reflects the last
+ call to `setblocking` or `settimeout`.
+ """
+ return self.timeout
+
+ def setblocking(self, blocking):
+ """
+ Set blocking or non-blocking mode of the channel: if ``blocking`` is 0,
+ the channel is set to non-blocking mode; otherwise it's set to blocking
+ mode. Initially all channels are in blocking mode.
+
+ In non-blocking mode, if a `recv` call doesn't find any data, or if a
+ `send` call can't immediately dispose of the data, an error exception
+ is raised. In blocking mode, the calls block until they can proceed. An
+ EOF condition is considered "immediate data" for `recv`, so if the
+ channel is closed in the read direction, it will never block.
+
+ ``chan.setblocking(0)`` is equivalent to ``chan.settimeout(0)``;
+ ``chan.setblocking(1)`` is equivalent to ``chan.settimeout(None)``.
+
+ :param int blocking:
+ 0 to set non-blocking mode; non-0 to set blocking mode.
+ """
+ if blocking:
+ self.settimeout(None)
+ else:
+ self.settimeout(0.0)
+
+ def getpeername(self):
+ """
+ Return the address of the remote side of this Channel, if possible.
+
+ This simply wraps `.Transport.getpeername`, used to provide enough of a
+ socket-like interface to allow asyncore to work. (asyncore likes to
+ call ``'getpeername'``.)
+ """
+ return self.transport.getpeername()
+
+ def close(self):
+ """
+ Close the channel. All future read/write operations on the channel
+ will fail. The remote end will receive no more data (after queued data
+ is flushed). Channels are automatically closed when their `.Transport`
+ is closed or when they are garbage collected.
+ """
+ self.lock.acquire()
+ try:
+ # only close the pipe when the user explicitly closes the channel.
+ # otherwise they will get unpleasant surprises. (and do it before
+ # checking self.closed, since the remote host may have already
+ # closed the connection.)
+ if self._pipe is not None:
+ self._pipe.close()
+ self._pipe = None
+
+ if not self.active or self.closed:
+ return
+ msgs = self._close_internal()
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def recv_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ channel. A ``False`` result does not mean that the channel has closed;
+ it means you may need to wait before more data arrives.
+
+ :return:
+ ``True`` if a `recv` call on this channel would immediately return
+ at least one byte; ``False`` otherwise.
+ """
+ return self.in_buffer.read_ready()
+
+ def recv(self, nbytes):
+ """
+ Receive data from the channel. The return value is a string
+ representing the data received. The maximum amount of data to be
+ received at once is specified by ``nbytes``. If a string of
+ length zero is returned, the channel stream has closed.
+
+ :param int nbytes: maximum number of bytes to read.
+ :return: received data, as a `bytes`.
+
+ :raises socket.timeout:
+ if no data is ready before the timeout set by `settimeout`.
+ """
+ try:
+ out = self.in_buffer.read(nbytes, self.timeout)
+ except PipeTimeout:
+ raise socket.timeout()
+
+ ack = self._check_add_window(len(out))
+ # no need to hold the channel lock when sending this
+ if ack > 0:
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
+ m.add_int(self.remote_chanid)
+ m.add_int(ack)
+ self.transport._send_user_message(m)
+
+ return out
+
+ def recv_stderr_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ channel's stderr stream. Only channels using `exec_command` or
+ `invoke_shell` without a pty will ever have data on the stderr
+ stream.
+
+ :return:
+ ``True`` if a `recv_stderr` call on this channel would immediately
+ return at least one byte; ``False`` otherwise.
+
+ .. versionadded:: 1.1
+ """
+ return self.in_stderr_buffer.read_ready()
+
+ def recv_stderr(self, nbytes):
+ """
+ Receive data from the channel's stderr stream. Only channels using
+ `exec_command` or `invoke_shell` without a pty will ever have data
+ on the stderr stream. The return value is a string representing the
+ data received. The maximum amount of data to be received at once is
+ specified by ``nbytes``. If a string of length zero is returned, the
+ channel stream has closed.
+
+ :param int nbytes: maximum number of bytes to read.
+ :return: received data as a `bytes`
+
+ :raises socket.timeout: if no data is ready before the timeout set by
+ `settimeout`.
+
+ .. versionadded:: 1.1
+ """
+ try:
+ out = self.in_stderr_buffer.read(nbytes, self.timeout)
+ except PipeTimeout:
+ raise socket.timeout()
+
+ ack = self._check_add_window(len(out))
+ # no need to hold the channel lock when sending this
+ if ack > 0:
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
+ m.add_int(self.remote_chanid)
+ m.add_int(ack)
+ self.transport._send_user_message(m)
+
+ return out
+
+ def send_ready(self):
+ """
+ Returns true if data can be written to this channel without blocking.
+ This means the channel is either closed (so any write attempt would
+ return immediately) or there is at least one byte of space in the
+ outbound buffer. If there is at least one byte of space in the
+ outbound buffer, a `send` call will succeed immediately and return
+ the number of bytes actually written.
+
+ :return:
+ ``True`` if a `send` call on this channel would immediately succeed
+ or fail
+ """
+ self.lock.acquire()
+ try:
+ if self.closed or self.eof_sent:
+ return True
+ return self.out_window_size > 0
+ finally:
+ self.lock.release()
+
+ def send(self, s):
+ """
+ Send data to the channel. Returns the number of bytes sent, or 0 if
+ the channel stream is closed. Applications are responsible for
+ checking that all data has been sent: if only some of the data was
+ transmitted, the application needs to attempt delivery of the remaining
+ data.
+
+ :param bytes s: data to send
+ :return: number of bytes actually sent, as an `int`
+
+ :raises socket.timeout: if no data could be sent before the timeout set
+ by `settimeout`.
+ """
+
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_DATA)
+ m.add_int(self.remote_chanid)
+ return self._send(s, m)
+
+ def send_stderr(self, s):
+ """
+ Send data to the channel on the "stderr" stream. This is normally
+ only used by servers to send output from shell commands -- clients
+ won't use this. Returns the number of bytes sent, or 0 if the channel
+ stream is closed. Applications are responsible for checking that all
+ data has been sent: if only some of the data was transmitted, the
+ application needs to attempt delivery of the remaining data.
+
+ :param bytes s: data to send.
+ :return: number of bytes actually sent, as an `int`.
+
+ :raises socket.timeout:
+ if no data could be sent before the timeout set by `settimeout`.
+
+ .. versionadded:: 1.1
+ """
+
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_EXTENDED_DATA)
+ m.add_int(self.remote_chanid)
+ m.add_int(1)
+ return self._send(s, m)
+
+ def sendall(self, s):
+ """
+ Send data to the channel, without allowing partial results. Unlike
+ `send`, this method continues to send data from the given string until
+ either all data has been sent or an error occurs. Nothing is returned.
+
+ :param bytes s: data to send.
+
+ :raises socket.timeout:
+ if sending stalled for longer than the timeout set by `settimeout`.
+ :raises socket.error:
+ if an error occurred before the entire string was sent.
+
+ .. note::
+ If the channel is closed while only part of the data has been
+ sent, there is no way to determine how much data (if any) was sent.
+ This is irritating, but identically follows Python's API.
+ """
+ while s:
+ sent = self.send(s)
+ s = s[sent:]
+ return None
+
+ def sendall_stderr(self, s):
+ """
+ Send data to the channel's "stderr" stream, without allowing partial
+ results. Unlike `send_stderr`, this method continues to send data
+ from the given bytestring until all data has been sent or an error
+ occurs. Nothing is returned.
+
+ :param bytes s: data to send to the client as "stderr" output.
+
+ :raises socket.timeout:
+ if sending stalled for longer than the timeout set by `settimeout`.
+ :raises socket.error:
+ if an error occurred before the entire string was sent.
+
+ .. versionadded:: 1.1
+ """
+ while s:
+ sent = self.send_stderr(s)
+ s = s[sent:]
+ return None
+
+ def makefile(self, *params):
+ """
+ Return a file-like object associated with this channel. The optional
+ ``mode`` and ``bufsize`` arguments are interpreted the same way as by
+ the built-in ``file()`` function in Python.
+
+ :return: `.ChannelFile` object which can be used for Python file I/O.
+ """
+ return ChannelFile(*([self] + list(params)))
+
+ def makefile_stderr(self, *params):
+ """
+ Return a file-like object associated with this channel's stderr
+ stream. Only channels using `exec_command` or `invoke_shell`
+ without a pty will ever have data on the stderr stream.
+
+ The optional ``mode`` and ``bufsize`` arguments are interpreted the
+ same way as by the built-in ``file()`` function in Python. For a
+ client, it only makes sense to open this file for reading. For a
+ server, it only makes sense to open this file for writing.
+
+ :returns:
+ `.ChannelStderrFile` object which can be used for Python file I/O.
+
+ .. versionadded:: 1.1
+ """
+ return ChannelStderrFile(*([self] + list(params)))
+
+ def makefile_stdin(self, *params):
+ """
+ Return a file-like object associated with this channel's stdin
+ stream.
+
+ The optional ``mode`` and ``bufsize`` arguments are interpreted the
+ same way as by the built-in ``file()`` function in Python. For a
+ client, it only makes sense to open this file for writing. For a
+ server, it only makes sense to open this file for reading.
+
+ :returns:
+ `.ChannelStdinFile` object which can be used for Python file I/O.
+
+ .. versionadded:: 2.6
+ """
+ return ChannelStdinFile(*([self] + list(params)))
+
+ def fileno(self):
+ """
+ Returns an OS-level file descriptor which can be used for polling, but
+ but not for reading or writing. This is primarily to allow Python's
+ ``select`` module to work.
+
+ The first time ``fileno`` is called on a channel, a pipe is created to
+ simulate real OS-level file descriptor (FD) behavior. Because of this,
+ two OS-level FDs are created, which will use up FDs faster than normal.
+ (You won't notice this effect unless you have hundreds of channels
+ open at the same time.)
+
+ :return: an OS-level file descriptor (`int`)
+
+ .. warning::
+ This method causes channel reads to be slightly less efficient.
+ """
+ self.lock.acquire()
+ try:
+ if self._pipe is not None:
+ return self._pipe.fileno()
+ # create the pipe and feed in any existing data
+ self._pipe = pipe.make_pipe()
+ p1, p2 = pipe.make_or_pipe(self._pipe)
+ self.in_buffer.set_event(p1)
+ self.in_stderr_buffer.set_event(p2)
+ return self._pipe.fileno()
+ finally:
+ self.lock.release()
+
+ def shutdown(self, how):
+ """
+ Shut down one or both halves of the connection. If ``how`` is 0,
+ further receives are disallowed. If ``how`` is 1, further sends
+ are disallowed. If ``how`` is 2, further sends and receives are
+ disallowed. This closes the stream in one or both directions.
+
+ :param int how:
+ 0 (stop receiving), 1 (stop sending), or 2 (stop receiving and
+ sending).
+ """
+ if (how == 0) or (how == 2):
+ # feign "read" shutdown
+ self.eof_received = 1
+ if (how == 1) or (how == 2):
+ self.lock.acquire()
+ try:
+ m = self._send_eof()
+ finally:
+ self.lock.release()
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def shutdown_read(self):
+ """
+ Shutdown the receiving side of this socket, closing the stream in
+ the incoming direction. After this call, future reads on this
+ channel will fail instantly. This is a convenience method, equivalent
+ to ``shutdown(0)``, for people who don't make it a habit to
+ memorize unix constants from the 1970s.
+
+ .. versionadded:: 1.2
+ """
+ self.shutdown(0)
+
+ def shutdown_write(self):
+ """
+ Shutdown the sending side of this socket, closing the stream in
+ the outgoing direction. After this call, future writes on this
+ channel will fail instantly. This is a convenience method, equivalent
+ to ``shutdown(1)``, for people who don't make it a habit to
+ memorize unix constants from the 1970s.
+
+ .. versionadded:: 1.2
+ """
+ self.shutdown(1)
+
+ @property
+ def _closed(self):
+ # Concession to Python 3's socket API, which has a private ._closed
+ # attribute instead of a semipublic .closed attribute.
+ return self.closed
+
+ # ...calls from Transport
+
+ def _set_transport(self, transport):
+ self.transport = transport
+ self.logger = util.get_logger(self.transport.get_log_channel())
+
+ def _set_window(self, window_size, max_packet_size):
+ self.in_window_size = window_size
+ self.in_max_packet_size = max_packet_size
+ # threshold of bytes we receive before we bother to send
+ # a window update
+ self.in_window_threshold = window_size // 10
+ self.in_window_sofar = 0
+ self._log(DEBUG, "Max packet in: {} bytes".format(max_packet_size))
+
+ def _set_remote_channel(self, chanid, window_size, max_packet_size):
+ self.remote_chanid = chanid
+ self.out_window_size = window_size
+ self.out_max_packet_size = self.transport._sanitize_packet_size(
+ max_packet_size
+ )
+ self.active = 1
+ self._log(
+ DEBUG, "Max packet out: {} bytes".format(self.out_max_packet_size)
+ )
+
+ def _request_success(self, m):
+ self._log(DEBUG, "Sesch channel {} request ok".format(self.chanid))
+ self.event_ready = True
+ self.event.set()
+ return
+
+ def _request_failed(self, m):
+ self.lock.acquire()
+ try:
+ msgs = self._close_internal()
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def _feed(self, m):
+ if isinstance(m, bytes):
+ # passed from _feed_extended
+ s = m
+ else:
+ s = m.get_binary()
+ self.in_buffer.feed(s)
+
+ def _feed_extended(self, m):
+ code = m.get_int()
+ s = m.get_binary()
+ if code != 1:
+ self._log(
+ ERROR, "unknown extended_data type {}; discarding".format(code)
+ )
+ return
+ if self.combine_stderr:
+ self._feed(s)
+ else:
+ self.in_stderr_buffer.feed(s)
+
+ def _window_adjust(self, m):
+ nbytes = m.get_int()
+ self.lock.acquire()
+ try:
+ if self.ultra_debug:
+ self._log(DEBUG, "window up {}".format(nbytes))
+ self.out_window_size += nbytes
+ self.out_buffer_cv.notify_all()
+ finally:
+ self.lock.release()
+
+ def _handle_request(self, m):
+ key = m.get_text()
+ want_reply = m.get_boolean()
+ server = self.transport.server_object
+ ok = False
+ if key == "exit-status":
+ self.exit_status = m.get_int()
+ self.status_event.set()
+ ok = True
+ elif key == "xon-xoff":
+ # ignore
+ ok = True
+ elif key == "pty-req":
+ term = m.get_string()
+ width = m.get_int()
+ height = m.get_int()
+ pixelwidth = m.get_int()
+ pixelheight = m.get_int()
+ modes = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_pty_request(
+ self, term, width, height, pixelwidth, pixelheight, modes
+ )
+ elif key == "shell":
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_shell_request(self)
+ elif key == "env":
+ name = m.get_string()
+ value = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_env_request(self, name, value)
+ elif key == "exec":
+ cmd = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_exec_request(self, cmd)
+ elif key == "subsystem":
+ name = m.get_text()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_subsystem_request(self, name)
+ elif key == "window-change":
+ width = m.get_int()
+ height = m.get_int()
+ pixelwidth = m.get_int()
+ pixelheight = m.get_int()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_window_change_request(
+ self, width, height, pixelwidth, pixelheight
+ )
+ elif key == "x11-req":
+ single_connection = m.get_boolean()
+ auth_proto = m.get_text()
+ auth_cookie = m.get_binary()
+ screen_number = m.get_int()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_x11_request(
+ self,
+ single_connection,
+ auth_proto,
+ auth_cookie,
+ screen_number,
+ )
+ elif key == "auth-agent-req@openssh.com":
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_forward_agent_request(self)
+ else:
+ self._log(DEBUG, 'Unhandled channel request "{}"'.format(key))
+ ok = False
+ if want_reply:
+ m = Message()
+ if ok:
+ m.add_byte(cMSG_CHANNEL_SUCCESS)
+ else:
+ m.add_byte(cMSG_CHANNEL_FAILURE)
+ m.add_int(self.remote_chanid)
+ self.transport._send_user_message(m)
+
+ def _handle_eof(self, m):
+ self.lock.acquire()
+ try:
+ if not self.eof_received:
+ self.eof_received = True
+ self.in_buffer.close()
+ self.in_stderr_buffer.close()
+ if self._pipe is not None:
+ self._pipe.set_forever()
+ finally:
+ self.lock.release()
+ self._log(DEBUG, "EOF received ({})".format(self._name))
+
+ def _handle_close(self, m):
+ self.lock.acquire()
+ try:
+ msgs = self._close_internal()
+ self.transport._unlink_channel(self.chanid)
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ # ...internals...
+
+ def _send(self, s, m):
+ size = len(s)
+ self.lock.acquire()
+ try:
+ if self.closed:
+ # this doesn't seem useful, but it is the documented behavior
+ # of Socket
+ raise socket.error("Socket is closed")
+ size = self._wait_for_send_window(size)
+ if size == 0:
+ # eof or similar
+ return 0
+ m.add_string(s[:size])
+ finally:
+ self.lock.release()
+ # Note: We release self.lock before calling _send_user_message.
+ # Otherwise, we can deadlock during re-keying.
+ self.transport._send_user_message(m)
+ return size
+
+ def _log(self, level, msg, *args):
+ self.logger.log(level, "[chan " + self._name + "] " + msg, *args)
+
+ def _event_pending(self):
+ self.event.clear()
+ self.event_ready = False
+
+ def _wait_for_event(self):
+ self.event.wait()
+ assert self.event.is_set()
+ if self.event_ready:
+ return
+ e = self.transport.get_exception()
+ if e is None:
+ e = SSHException("Channel closed.")
+ raise e
+
+ def _set_closed(self):
+ # you are holding the lock.
+ self.closed = True
+ self.in_buffer.close()
+ self.in_stderr_buffer.close()
+ self.out_buffer_cv.notify_all()
+ # Notify any waiters that we are closed
+ self.event.set()
+ self.status_event.set()
+ if self._pipe is not None:
+ self._pipe.set_forever()
+
+ def _send_eof(self):
+ # you are holding the lock.
+ if self.eof_sent:
+ return None
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_EOF)
+ m.add_int(self.remote_chanid)
+ self.eof_sent = True
+ self._log(DEBUG, "EOF sent ({})".format(self._name))
+ return m
+
+ def _close_internal(self):
+ # you are holding the lock.
+ if not self.active or self.closed:
+ return None, None
+ m1 = self._send_eof()
+ m2 = Message()
+ m2.add_byte(cMSG_CHANNEL_CLOSE)
+ m2.add_int(self.remote_chanid)
+ self._set_closed()
+ # can't unlink from the Transport yet -- the remote side may still
+ # try to send meta-data (exit-status, etc)
+ return m1, m2
+
+ def _unlink(self):
+ # server connection could die before we become active:
+ # still signal the close!
+ if self.closed:
+ return
+ self.lock.acquire()
+ try:
+ self._set_closed()
+ self.transport._unlink_channel(self.chanid)
+ finally:
+ self.lock.release()
+
+ def _check_add_window(self, n):
+ self.lock.acquire()
+ try:
+ if self.closed or self.eof_received or not self.active:
+ return 0
+ if self.ultra_debug:
+ self._log(DEBUG, "addwindow {}".format(n))
+ self.in_window_sofar += n
+ if self.in_window_sofar <= self.in_window_threshold:
+ return 0
+ if self.ultra_debug:
+ self._log(
+ DEBUG, "addwindow send {}".format(self.in_window_sofar)
+ )
+ out = self.in_window_sofar
+ self.in_window_sofar = 0
+ return out
+ finally:
+ self.lock.release()
+
+ def _wait_for_send_window(self, size):
+ """
+ (You are already holding the lock.)
+ Wait for the send window to open up, and allocate up to ``size`` bytes
+ for transmission. If no space opens up before the timeout, a timeout
+ exception is raised. Returns the number of bytes available to send
+ (may be less than requested).
+ """
+ # you are already holding the lock
+ if self.closed or self.eof_sent:
+ return 0
+ if self.out_window_size == 0:
+ # should we block?
+ if self.timeout == 0.0:
+ raise socket.timeout()
+ # loop here in case we get woken up but a different thread has
+ # filled the buffer
+ timeout = self.timeout
+ while self.out_window_size == 0:
+ if self.closed or self.eof_sent:
+ return 0
+ then = time.time()
+ self.out_buffer_cv.wait(timeout)
+ if timeout is not None:
+ timeout -= time.time() - then
+ if timeout <= 0.0:
+ raise socket.timeout()
+ # we have some window to squeeze into
+ if self.closed or self.eof_sent:
+ return 0
+ if self.out_window_size < size:
+ size = self.out_window_size
+ if self.out_max_packet_size - 64 < size:
+ size = self.out_max_packet_size - 64
+ self.out_window_size -= size
+ if self.ultra_debug:
+ self._log(DEBUG, "window down to {}".format(self.out_window_size))
+ return size
+
+
+class ChannelFile(BufferedFile):
+ """
+ A file-like wrapper around `.Channel`. A ChannelFile is created by calling
+ `Channel.makefile`.
+
+ .. warning::
+ To correctly emulate the file object created from a socket's `makefile
+ <python:socket.socket.makefile>` method, a `.Channel` and its
+ `.ChannelFile` should be able to be closed or garbage-collected
+ independently. Currently, closing the `ChannelFile` does nothing but
+ flush the buffer.
+ """
+
+ def __init__(self, channel, mode="r", bufsize=-1):
+ self.channel = channel
+ BufferedFile.__init__(self)
+ self._set_mode(mode, bufsize)
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+ """
+ return "<paramiko.ChannelFile from " + repr(self.channel) + ">"
+
+ def _read(self, size):
+ return self.channel.recv(size)
+
+ def _write(self, data):
+ self.channel.sendall(data)
+ return len(data)
+
+
+class ChannelStderrFile(ChannelFile):
+ """
+ A file-like wrapper around `.Channel` stderr.
+
+ See `Channel.makefile_stderr` for details.
+ """
+
+ def _read(self, size):
+ return self.channel.recv_stderr(size)
+
+ def _write(self, data):
+ self.channel.sendall_stderr(data)
+ return len(data)
+
+
+class ChannelStdinFile(ChannelFile):
+ """
+ A file-like wrapper around `.Channel` stdin.
+
+ See `Channel.makefile_stdin` for details.
+ """
+
+ def close(self):
+ super().close()
+ self.channel.shutdown_write()
diff --git a/paramiko/client.py b/paramiko/client.py
new file mode 100644
index 0000000..d8be910
--- /dev/null
+++ b/paramiko/client.py
@@ -0,0 +1,893 @@
+# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+SSH client & key policies
+"""
+
+from binascii import hexlify
+import getpass
+import inspect
+import os
+import socket
+import warnings
+from errno import ECONNREFUSED, EHOSTUNREACH
+
+from paramiko.agent import Agent
+from paramiko.common import DEBUG
+from paramiko.config import SSH_PORT
+from paramiko.dsskey import DSSKey
+from paramiko.ecdsakey import ECDSAKey
+from paramiko.ed25519key import Ed25519Key
+from paramiko.hostkeys import HostKeys
+from paramiko.rsakey import RSAKey
+from paramiko.ssh_exception import (
+ SSHException,
+ BadHostKeyException,
+ NoValidConnectionsError,
+)
+from paramiko.transport import Transport
+from paramiko.util import ClosingContextManager
+
+
+class SSHClient(ClosingContextManager):
+ """
+ A high-level representation of a session with an SSH server. This class
+ wraps `.Transport`, `.Channel`, and `.SFTPClient` to take care of most
+ aspects of authenticating and opening channels. A typical use case is::
+
+ client = SSHClient()
+ client.load_system_host_keys()
+ client.connect('ssh.example.com')
+ stdin, stdout, stderr = client.exec_command('ls -l')
+
+ You may pass in explicit overrides for authentication and server host key
+ checking. The default mechanism is to try to use local key files or an
+ SSH agent (if one is running).
+
+ Instances of this class may be used as context managers.
+
+ .. versionadded:: 1.6
+ """
+
+ def __init__(self):
+ """
+ Create a new SSHClient.
+ """
+ self._system_host_keys = HostKeys()
+ self._host_keys = HostKeys()
+ self._host_keys_filename = None
+ self._log_channel = None
+ self._policy = RejectPolicy()
+ self._transport = None
+ self._agent = None
+
+ def load_system_host_keys(self, filename=None):
+ """
+ Load host keys from a system (read-only) file. Host keys read with
+ this method will not be saved back by `save_host_keys`.
+
+ This method can be called multiple times. Each new set of host keys
+ will be merged with the existing set (new replacing old if there are
+ conflicts).
+
+ If ``filename`` is left as ``None``, an attempt will be made to read
+ keys from the user's local "known hosts" file, as used by OpenSSH,
+ and no exception will be raised if the file can't be read. This is
+ probably only useful on posix.
+
+ :param str filename: the filename to read, or ``None``
+
+ :raises: ``IOError`` --
+ if a filename was provided and the file could not be read
+ """
+ if filename is None:
+ # try the user's .ssh key file, and mask exceptions
+ filename = os.path.expanduser("~/.ssh/known_hosts")
+ try:
+ self._system_host_keys.load(filename)
+ except IOError:
+ pass
+ return
+ self._system_host_keys.load(filename)
+
+ def load_host_keys(self, filename):
+ """
+ Load host keys from a local host-key file. Host keys read with this
+ method will be checked after keys loaded via `load_system_host_keys`,
+ but will be saved back by `save_host_keys` (so they can be modified).
+ The missing host key policy `.AutoAddPolicy` adds keys to this set and
+ saves them, when connecting to a previously-unknown server.
+
+ This method can be called multiple times. Each new set of host keys
+ will be merged with the existing set (new replacing old if there are
+ conflicts). When automatically saving, the last hostname is used.
+
+ :param str filename: the filename to read
+
+ :raises: ``IOError`` -- if the filename could not be read
+ """
+ self._host_keys_filename = filename
+ self._host_keys.load(filename)
+
+ def save_host_keys(self, filename):
+ """
+ Save the host keys back to a file. Only the host keys loaded with
+ `load_host_keys` (plus any added directly) will be saved -- not any
+ host keys loaded with `load_system_host_keys`.
+
+ :param str filename: the filename to save to
+
+ :raises: ``IOError`` -- if the file could not be written
+ """
+
+ # update local host keys from file (in case other SSH clients
+ # have written to the known_hosts file meanwhile.
+ if self._host_keys_filename is not None:
+ self.load_host_keys(self._host_keys_filename)
+
+ with open(filename, "w") as f:
+ for hostname, keys in self._host_keys.items():
+ for keytype, key in keys.items():
+ f.write(
+ "{} {} {}\n".format(
+ hostname, keytype, key.get_base64()
+ )
+ )
+
+ def get_host_keys(self):
+ """
+ Get the local `.HostKeys` object. This can be used to examine the
+ local host keys or change them.
+
+ :return: the local host keys as a `.HostKeys` object.
+ """
+ return self._host_keys
+
+ def set_log_channel(self, name):
+ """
+ Set the channel for logging. The default is ``"paramiko.transport"``
+ but it can be set to anything you want.
+
+ :param str name: new channel name for logging
+ """
+ self._log_channel = name
+
+ def set_missing_host_key_policy(self, policy):
+ """
+ Set policy to use when connecting to servers without a known host key.
+
+ Specifically:
+
+ * A **policy** is a "policy class" (or instance thereof), namely some
+ subclass of `.MissingHostKeyPolicy` such as `.RejectPolicy` (the
+ default), `.AutoAddPolicy`, `.WarningPolicy`, or a user-created
+ subclass.
+ * A host key is **known** when it appears in the client object's cached
+ host keys structures (those manipulated by `load_system_host_keys`
+ and/or `load_host_keys`).
+
+ :param .MissingHostKeyPolicy policy:
+ the policy to use when receiving a host key from a
+ previously-unknown server
+ """
+ if inspect.isclass(policy):
+ policy = policy()
+ self._policy = policy
+
+ def _families_and_addresses(self, hostname, port):
+ """
+ Yield pairs of address families and addresses to try for connecting.
+
+ :param str hostname: the server to connect to
+ :param int port: the server port to connect to
+ :returns: Yields an iterable of ``(family, address)`` tuples
+ """
+ guess = True
+ addrinfos = socket.getaddrinfo(
+ hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
+ )
+ for (family, socktype, proto, canonname, sockaddr) in addrinfos:
+ if socktype == socket.SOCK_STREAM:
+ yield family, sockaddr
+ guess = False
+
+ # some OS like AIX don't indicate SOCK_STREAM support, so just
+ # guess. :( We only do this if we did not get a single result marked
+ # as socktype == SOCK_STREAM.
+ if guess:
+ for family, _, _, _, sockaddr in addrinfos:
+ yield family, sockaddr
+
+ def connect(
+ self,
+ hostname,
+ port=SSH_PORT,
+ username=None,
+ password=None,
+ pkey=None,
+ key_filename=None,
+ timeout=None,
+ allow_agent=True,
+ look_for_keys=True,
+ compress=False,
+ sock=None,
+ gss_auth=False,
+ gss_kex=False,
+ gss_deleg_creds=True,
+ gss_host=None,
+ banner_timeout=None,
+ auth_timeout=None,
+ channel_timeout=None,
+ gss_trust_dns=True,
+ passphrase=None,
+ disabled_algorithms=None,
+ transport_factory=None,
+ auth_strategy=None,
+ ):
+ """
+ Connect to an SSH server and authenticate to it. The server's host key
+ is checked against the system host keys (see `load_system_host_keys`)
+ and any local host keys (`load_host_keys`). If the server's hostname
+ is not found in either set of host keys, the missing host key policy
+ is used (see `set_missing_host_key_policy`). The default policy is
+ to reject the key and raise an `.SSHException`.
+
+ Authentication is attempted in the following order of priority:
+
+ - The ``pkey`` or ``key_filename`` passed in (if any)
+
+ - ``key_filename`` may contain OpenSSH public certificate paths
+ as well as regular private-key paths; when files ending in
+ ``-cert.pub`` are found, they are assumed to match a private
+ key, and both components will be loaded. (The private key
+ itself does *not* need to be listed in ``key_filename`` for
+ this to occur - *just* the certificate.)
+
+ - Any key we can find through an SSH agent
+ - Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
+ ``~/.ssh/``
+
+ - When OpenSSH-style public certificates exist that match an
+ existing such private key (so e.g. one has ``id_rsa`` and
+ ``id_rsa-cert.pub``) the certificate will be loaded alongside
+ the private key and used for authentication.
+
+ - Plain username/password auth, if a password was given
+
+ If a private key requires a password to unlock it, and a password is
+ passed in, that password will be used to attempt to unlock the key.
+
+ :param str hostname: the server to connect to
+ :param int port: the server port to connect to
+ :param str username:
+ the username to authenticate as (defaults to the current local
+ username)
+ :param str password:
+ Used for password authentication; is also used for private key
+ decryption if ``passphrase`` is not given.
+ :param str passphrase:
+ Used for decrypting private keys.
+ :param .PKey pkey: an optional private key to use for authentication
+ :param str key_filename:
+ the filename, or list of filenames, of optional private key(s)
+ and/or certs to try for authentication
+ :param float timeout:
+ an optional timeout (in seconds) for the TCP connect
+ :param bool allow_agent:
+ set to False to disable connecting to the SSH agent
+ :param bool look_for_keys:
+ set to False to disable searching for discoverable private key
+ files in ``~/.ssh/``
+ :param bool compress: set to True to turn on compression
+ :param socket sock:
+ an open socket or socket-like object (such as a `.Channel`) to use
+ for communication to the target host
+ :param bool gss_auth:
+ ``True`` if you want to use GSS-API authentication
+ :param bool gss_kex:
+ Perform GSS-API Key Exchange and user authentication
+ :param bool gss_deleg_creds: Delegate GSS-API client credentials or not
+ :param str gss_host:
+ The targets name in the kerberos database. default: hostname
+ :param bool gss_trust_dns:
+ Indicates whether or not the DNS is trusted to securely
+ canonicalize the name of the host being connected to (default
+ ``True``).
+ :param float banner_timeout: an optional timeout (in seconds) to wait
+ for the SSH banner to be presented.
+ :param float auth_timeout: an optional timeout (in seconds) to wait for
+ an authentication response.
+ :param float channel_timeout: an optional timeout (in seconds) to wait
+ for a channel open response.
+ :param dict disabled_algorithms:
+ an optional dict passed directly to `.Transport` and its keyword
+ argument of the same name.
+ :param transport_factory:
+ an optional callable which is handed a subset of the constructor
+ arguments (primarily those related to the socket, GSS
+ functionality, and algorithm selection) and generates a
+ `.Transport` instance to be used by this client. Defaults to
+ `.Transport.__init__`.
+ :param auth_strategy:
+ an optional instance of `.AuthStrategy`, triggering use of this
+ newer authentication mechanism instead of SSHClient's legacy auth
+ method.
+
+ .. warning::
+ This parameter is **incompatible** with all other
+ authentication-related parameters (such as, but not limited to,
+ ``password``, ``key_filename`` and ``allow_agent``) and will
+ trigger an exception if given alongside them.
+
+ :returns:
+ `.AuthResult` if ``auth_strategy`` is non-``None``; otherwise,
+ returns ``None``.
+
+ :raises BadHostKeyException:
+ if the server's host key could not be verified.
+ :raises AuthenticationException:
+ if authentication failed.
+ :raises UnableToAuthenticate:
+ if authentication failed (when ``auth_strategy`` is non-``None``;
+ and note that this is a subclass of ``AuthenticationException``).
+ :raises socket.error:
+ if a socket error (other than connection-refused or
+ host-unreachable) occurred while connecting.
+ :raises NoValidConnectionsError:
+ if all valid connection targets for the requested hostname (eg IPv4
+ and IPv6) yielded connection-refused or host-unreachable socket
+ errors.
+ :raises SSHException:
+ if there was any other error connecting or establishing an SSH
+ session.
+
+ .. versionchanged:: 1.15
+ Added the ``banner_timeout``, ``gss_auth``, ``gss_kex``,
+ ``gss_deleg_creds`` and ``gss_host`` arguments.
+ .. versionchanged:: 2.3
+ Added the ``gss_trust_dns`` argument.
+ .. versionchanged:: 2.4
+ Added the ``passphrase`` argument.
+ .. versionchanged:: 2.6
+ Added the ``disabled_algorithms`` argument.
+ .. versionchanged:: 2.12
+ Added the ``transport_factory`` argument.
+ .. versionchanged:: 3.2
+ Added the ``auth_strategy`` argument.
+ """
+ if not sock:
+ errors = {}
+ # Try multiple possible address families (e.g. IPv4 vs IPv6)
+ to_try = list(self._families_and_addresses(hostname, port))
+ for af, addr in to_try:
+ try:
+ sock = socket.socket(af, socket.SOCK_STREAM)
+ if timeout is not None:
+ try:
+ sock.settimeout(timeout)
+ except:
+ pass
+ sock.connect(addr)
+ # Break out of the loop on success
+ break
+ except socket.error as e:
+ # As mentioned in socket docs it is better
+ # to close sockets explicitly
+ if sock:
+ sock.close()
+ # Raise anything that isn't a straight up connection error
+ # (such as a resolution error)
+ if e.errno not in (ECONNREFUSED, EHOSTUNREACH):
+ raise
+ # Capture anything else so we know how the run looks once
+ # iteration is complete. Retain info about which attempt
+ # this was.
+ errors[addr] = e
+
+ # Make sure we explode usefully if no address family attempts
+ # succeeded. We've no way of knowing which error is the "right"
+ # one, so we construct a hybrid exception containing all the real
+ # ones, of a subclass that client code should still be watching for
+ # (socket.error)
+ if len(errors) == len(to_try):
+ raise NoValidConnectionsError(errors)
+
+ if transport_factory is None:
+ transport_factory = Transport
+ t = self._transport = transport_factory(
+ sock,
+ gss_kex=gss_kex,
+ gss_deleg_creds=gss_deleg_creds,
+ disabled_algorithms=disabled_algorithms,
+ )
+ t.use_compression(compress=compress)
+ t.set_gss_host(
+ # t.hostname may be None, but GSS-API requires a target name.
+ # Therefore use hostname as fallback.
+ gss_host=gss_host or hostname,
+ trust_dns=gss_trust_dns,
+ gssapi_requested=gss_auth or gss_kex,
+ )
+ if self._log_channel is not None:
+ t.set_log_channel(self._log_channel)
+ if banner_timeout is not None:
+ t.banner_timeout = banner_timeout
+ if auth_timeout is not None:
+ t.auth_timeout = auth_timeout
+ if channel_timeout is not None:
+ t.channel_timeout = channel_timeout
+
+ if port == SSH_PORT:
+ server_hostkey_name = hostname
+ else:
+ server_hostkey_name = "[{}]:{}".format(hostname, port)
+ our_server_keys = None
+
+ our_server_keys = self._system_host_keys.get(server_hostkey_name)
+ if our_server_keys is None:
+ our_server_keys = self._host_keys.get(server_hostkey_name)
+ if our_server_keys is not None:
+ keytype = our_server_keys.keys()[0]
+ sec_opts = t.get_security_options()
+ other_types = [x for x in sec_opts.key_types if x != keytype]
+ sec_opts.key_types = [keytype] + other_types
+
+ t.start_client(timeout=timeout)
+
+ # If GSS-API Key Exchange is performed we are not required to check the
+ # host key, because the host is authenticated via GSS-API / SSPI as
+ # well as our client.
+ if not self._transport.gss_kex_used:
+ server_key = t.get_remote_server_key()
+ if our_server_keys is None:
+ # will raise exception if the key is rejected
+ self._policy.missing_host_key(
+ self, server_hostkey_name, server_key
+ )
+ else:
+ our_key = our_server_keys.get(server_key.get_name())
+ if our_key != server_key:
+ if our_key is None:
+ our_key = list(our_server_keys.values())[0]
+ raise BadHostKeyException(hostname, server_key, our_key)
+
+ if username is None:
+ username = getpass.getuser()
+
+ # New auth flow!
+ if auth_strategy is not None:
+ return auth_strategy.authenticate(transport=t)
+
+ # Old auth flow!
+ if key_filename is None:
+ key_filenames = []
+ elif isinstance(key_filename, str):
+ key_filenames = [key_filename]
+ else:
+ key_filenames = key_filename
+
+ self._auth(
+ username,
+ password,
+ pkey,
+ key_filenames,
+ allow_agent,
+ look_for_keys,
+ gss_auth,
+ gss_kex,
+ gss_deleg_creds,
+ t.gss_host,
+ passphrase,
+ )
+
+ def close(self):
+ """
+ Close this SSHClient and its underlying `.Transport`.
+
+ This should be called anytime you are done using the client object.
+
+ .. warning::
+ Paramiko registers garbage collection hooks that will try to
+ automatically close connections for you, but this is not presently
+ reliable. Failure to explicitly close your client after use may
+ lead to end-of-process hangs!
+ """
+ if self._transport is None:
+ return
+ self._transport.close()
+ self._transport = None
+
+ if self._agent is not None:
+ self._agent.close()
+ self._agent = None
+
+ def exec_command(
+ self,
+ command,
+ bufsize=-1,
+ timeout=None,
+ get_pty=False,
+ environment=None,
+ ):
+ """
+ Execute a command on the SSH server. A new `.Channel` is opened and
+ the requested command is executed. The command's input and output
+ streams are returned as Python ``file``-like objects representing
+ stdin, stdout, and stderr.
+
+ :param str command: the command to execute
+ :param int bufsize:
+ interpreted the same way as by the built-in ``file()`` function in
+ Python
+ :param int timeout:
+ set command's channel timeout. See `.Channel.settimeout`
+ :param bool get_pty:
+ Request a pseudo-terminal from the server (default ``False``).
+ See `.Channel.get_pty`
+ :param dict environment:
+ a dict of shell environment variables, to be merged into the
+ default environment that the remote command executes within.
+
+ .. warning::
+ Servers may silently reject some environment variables; see the
+ warning in `.Channel.set_environment_variable` for details.
+
+ :return:
+ the stdin, stdout, and stderr of the executing command, as a
+ 3-tuple
+
+ :raises: `.SSHException` -- if the server fails to execute the command
+
+ .. versionchanged:: 1.10
+ Added the ``get_pty`` kwarg.
+ """
+ chan = self._transport.open_session(timeout=timeout)
+ if get_pty:
+ chan.get_pty()
+ chan.settimeout(timeout)
+ if environment:
+ chan.update_environment(environment)
+ chan.exec_command(command)
+ stdin = chan.makefile_stdin("wb", bufsize)
+ stdout = chan.makefile("r", bufsize)
+ stderr = chan.makefile_stderr("r", bufsize)
+ return stdin, stdout, stderr
+
+ def invoke_shell(
+ self,
+ term="vt100",
+ width=80,
+ height=24,
+ width_pixels=0,
+ height_pixels=0,
+ environment=None,
+ ):
+ """
+ Start an interactive shell session on the SSH server. A new `.Channel`
+ is opened and connected to a pseudo-terminal using the requested
+ terminal type and size.
+
+ :param str term:
+ the terminal type to emulate (for example, ``"vt100"``)
+ :param int width: the width (in characters) of the terminal window
+ :param int height: the height (in characters) of the terminal window
+ :param int width_pixels: the width (in pixels) of the terminal window
+ :param int height_pixels: the height (in pixels) of the terminal window
+ :param dict environment: the command's environment
+ :return: a new `.Channel` connected to the remote shell
+
+ :raises: `.SSHException` -- if the server fails to invoke a shell
+ """
+ chan = self._transport.open_session()
+ chan.get_pty(term, width, height, width_pixels, height_pixels)
+ chan.invoke_shell()
+ return chan
+
+ def open_sftp(self):
+ """
+ Open an SFTP session on the SSH server.
+
+ :return: a new `.SFTPClient` session object
+ """
+ return self._transport.open_sftp_client()
+
+ def get_transport(self):
+ """
+ Return the underlying `.Transport` object for this SSH connection.
+ This can be used to perform lower-level tasks, like opening specific
+ kinds of channels.
+
+ :return: the `.Transport` for this connection
+ """
+ return self._transport
+
+ def _key_from_filepath(self, filename, klass, password):
+ """
+ Attempt to derive a `.PKey` from given string path ``filename``:
+
+ - If ``filename`` appears to be a cert, the matching private key is
+ loaded.
+ - Otherwise, the filename is assumed to be a private key, and the
+ matching public cert will be loaded if it exists.
+ """
+ cert_suffix = "-cert.pub"
+ # Assume privkey, not cert, by default
+ if filename.endswith(cert_suffix):
+ key_path = filename[: -len(cert_suffix)]
+ cert_path = filename
+ else:
+ key_path = filename
+ cert_path = filename + cert_suffix
+ # Blindly try the key path; if no private key, nothing will work.
+ key = klass.from_private_key_file(key_path, password)
+ # TODO: change this to 'Loading' instead of 'Trying' sometime; probably
+ # when #387 is released, since this is a critical log message users are
+ # likely testing/filtering for (bah.)
+ msg = "Trying discovered key {} in {}".format(
+ hexlify(key.get_fingerprint()), key_path
+ )
+ self._log(DEBUG, msg)
+ # Attempt to load cert if it exists.
+ if os.path.isfile(cert_path):
+ key.load_certificate(cert_path)
+ self._log(DEBUG, "Adding public certificate {}".format(cert_path))
+ return key
+
+ def _auth(
+ self,
+ username,
+ password,
+ pkey,
+ key_filenames,
+ allow_agent,
+ look_for_keys,
+ gss_auth,
+ gss_kex,
+ gss_deleg_creds,
+ gss_host,
+ passphrase,
+ ):
+ """
+ Try, in order:
+
+ - The key(s) passed in, if one was passed in.
+ - Any key we can find through an SSH agent (if allowed).
+ - Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in ~/.ssh/
+ (if allowed).
+ - Plain username/password auth, if a password was given.
+
+ (The password might be needed to unlock a private key [if 'passphrase'
+ isn't also given], or for two-factor authentication [for which it is
+ required].)
+ """
+ saved_exception = None
+ two_factor = False
+ allowed_types = set()
+ two_factor_types = {"keyboard-interactive", "password"}
+ if passphrase is None and password is not None:
+ passphrase = password
+
+ # If GSS-API support and GSS-PI Key Exchange was performed, we attempt
+ # authentication with gssapi-keyex.
+ if gss_kex and self._transport.gss_kex_used:
+ try:
+ self._transport.auth_gssapi_keyex(username)
+ return
+ except Exception as e:
+ saved_exception = e
+
+ # Try GSS-API authentication (gssapi-with-mic) only if GSS-API Key
+ # Exchange is not performed, because if we use GSS-API for the key
+ # exchange, there is already a fully established GSS-API context, so
+ # why should we do that again?
+ if gss_auth:
+ try:
+ return self._transport.auth_gssapi_with_mic(
+ username, gss_host, gss_deleg_creds
+ )
+ except Exception as e:
+ saved_exception = e
+
+ if pkey is not None:
+ try:
+ self._log(
+ DEBUG,
+ "Trying SSH key {}".format(
+ hexlify(pkey.get_fingerprint())
+ ),
+ )
+ allowed_types = set(
+ self._transport.auth_publickey(username, pkey)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ except SSHException as e:
+ saved_exception = e
+
+ if not two_factor:
+ for key_filename in key_filenames:
+ # TODO 4.0: leverage PKey.from_path() if we don't end up just
+ # killing SSHClient entirely
+ for pkey_class in (RSAKey, DSSKey, ECDSAKey, Ed25519Key):
+ try:
+ key = self._key_from_filepath(
+ key_filename, pkey_class, passphrase
+ )
+ allowed_types = set(
+ self._transport.auth_publickey(username, key)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ break
+ except SSHException as e:
+ saved_exception = e
+
+ if not two_factor and allow_agent:
+ if self._agent is None:
+ self._agent = Agent()
+
+ for key in self._agent.get_keys():
+ try:
+ id_ = hexlify(key.get_fingerprint())
+ self._log(DEBUG, "Trying SSH agent key {}".format(id_))
+ # for 2-factor auth a successfully auth'd key password
+ # will return an allowed 2fac auth method
+ allowed_types = set(
+ self._transport.auth_publickey(username, key)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ break
+ except SSHException as e:
+ saved_exception = e
+
+ if not two_factor:
+ keyfiles = []
+
+ for keytype, name in [
+ (RSAKey, "rsa"),
+ (DSSKey, "dsa"),
+ (ECDSAKey, "ecdsa"),
+ (Ed25519Key, "ed25519"),
+ ]:
+ # ~/ssh/ is for windows
+ for directory in [".ssh", "ssh"]:
+ full_path = os.path.expanduser(
+ "~/{}/id_{}".format(directory, name)
+ )
+ if os.path.isfile(full_path):
+ # TODO: only do this append if below did not run
+ keyfiles.append((keytype, full_path))
+ if os.path.isfile(full_path + "-cert.pub"):
+ keyfiles.append((keytype, full_path + "-cert.pub"))
+
+ if not look_for_keys:
+ keyfiles = []
+
+ for pkey_class, filename in keyfiles:
+ try:
+ key = self._key_from_filepath(
+ filename, pkey_class, passphrase
+ )
+ # for 2-factor auth a successfully auth'd key will result
+ # in ['password']
+ allowed_types = set(
+ self._transport.auth_publickey(username, key)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ break
+ except (SSHException, IOError) as e:
+ saved_exception = e
+
+ if password is not None:
+ try:
+ self._transport.auth_password(username, password)
+ return
+ except SSHException as e:
+ saved_exception = e
+ elif two_factor:
+ try:
+ self._transport.auth_interactive_dumb(username)
+ return
+ except SSHException as e:
+ saved_exception = e
+
+ # if we got an auth-failed exception earlier, re-raise it
+ if saved_exception is not None:
+ raise saved_exception
+ raise SSHException("No authentication methods available")
+
+ def _log(self, level, msg):
+ self._transport._log(level, msg)
+
+
+class MissingHostKeyPolicy:
+ """
+ Interface for defining the policy that `.SSHClient` should use when the
+ SSH server's hostname is not in either the system host keys or the
+ application's keys. Pre-made classes implement policies for automatically
+ adding the key to the application's `.HostKeys` object (`.AutoAddPolicy`),
+ and for automatically rejecting the key (`.RejectPolicy`).
+
+ This function may be used to ask the user to verify the key, for example.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ """
+ Called when an `.SSHClient` receives a server key for a server that
+ isn't in either the system or local `.HostKeys` object. To accept
+ the key, simply return. To reject, raised an exception (which will
+ be passed to the calling application).
+ """
+ pass
+
+
+class AutoAddPolicy(MissingHostKeyPolicy):
+ """
+ Policy for automatically adding the hostname and new host key to the
+ local `.HostKeys` object, and saving it. This is used by `.SSHClient`.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ client._host_keys.add(hostname, key.get_name(), key)
+ if client._host_keys_filename is not None:
+ client.save_host_keys(client._host_keys_filename)
+ client._log(
+ DEBUG,
+ "Adding {} host key for {}: {}".format(
+ key.get_name(), hostname, hexlify(key.get_fingerprint())
+ ),
+ )
+
+
+class RejectPolicy(MissingHostKeyPolicy):
+ """
+ Policy for automatically rejecting the unknown hostname & key. This is
+ used by `.SSHClient`.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ client._log(
+ DEBUG,
+ "Rejecting {} host key for {}: {}".format(
+ key.get_name(), hostname, hexlify(key.get_fingerprint())
+ ),
+ )
+ raise SSHException(
+ "Server {!r} not found in known_hosts".format(hostname)
+ )
+
+
+class WarningPolicy(MissingHostKeyPolicy):
+ """
+ Policy for logging a Python-style warning for an unknown host key, but
+ accepting it. This is used by `.SSHClient`.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ warnings.warn(
+ "Unknown {} host key for {}: {}".format(
+ key.get_name(), hostname, hexlify(key.get_fingerprint())
+ )
+ )
diff --git a/paramiko/common.py b/paramiko/common.py
new file mode 100644
index 0000000..b57149b
--- /dev/null
+++ b/paramiko/common.py
@@ -0,0 +1,245 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Common constants and global variables.
+"""
+import logging
+import struct
+
+#
+# Formerly of py3compat.py. May be fully delete'able with a deeper look?
+#
+
+
+def byte_chr(c):
+ assert isinstance(c, int)
+ return struct.pack("B", c)
+
+
+def byte_mask(c, mask):
+ assert isinstance(c, int)
+ return struct.pack("B", c & mask)
+
+
+def byte_ord(c):
+ # In case we're handed a string instead of an int.
+ if not isinstance(c, int):
+ c = ord(c)
+ return c
+
+
+(
+ MSG_DISCONNECT,
+ MSG_IGNORE,
+ MSG_UNIMPLEMENTED,
+ MSG_DEBUG,
+ MSG_SERVICE_REQUEST,
+ MSG_SERVICE_ACCEPT,
+ MSG_EXT_INFO,
+) = range(1, 8)
+(MSG_KEXINIT, MSG_NEWKEYS) = range(20, 22)
+(
+ MSG_USERAUTH_REQUEST,
+ MSG_USERAUTH_FAILURE,
+ MSG_USERAUTH_SUCCESS,
+ MSG_USERAUTH_BANNER,
+) = range(50, 54)
+MSG_USERAUTH_PK_OK = 60
+(MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE) = range(60, 62)
+(MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN) = range(60, 62)
+(
+ MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE,
+ MSG_USERAUTH_GSSAPI_ERROR,
+ MSG_USERAUTH_GSSAPI_ERRTOK,
+ MSG_USERAUTH_GSSAPI_MIC,
+) = range(63, 67)
+HIGHEST_USERAUTH_MESSAGE_ID = 79
+(MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE) = range(80, 83)
+(
+ MSG_CHANNEL_OPEN,
+ MSG_CHANNEL_OPEN_SUCCESS,
+ MSG_CHANNEL_OPEN_FAILURE,
+ MSG_CHANNEL_WINDOW_ADJUST,
+ MSG_CHANNEL_DATA,
+ MSG_CHANNEL_EXTENDED_DATA,
+ MSG_CHANNEL_EOF,
+ MSG_CHANNEL_CLOSE,
+ MSG_CHANNEL_REQUEST,
+ MSG_CHANNEL_SUCCESS,
+ MSG_CHANNEL_FAILURE,
+) = range(90, 101)
+
+cMSG_DISCONNECT = byte_chr(MSG_DISCONNECT)
+cMSG_IGNORE = byte_chr(MSG_IGNORE)
+cMSG_UNIMPLEMENTED = byte_chr(MSG_UNIMPLEMENTED)
+cMSG_DEBUG = byte_chr(MSG_DEBUG)
+cMSG_SERVICE_REQUEST = byte_chr(MSG_SERVICE_REQUEST)
+cMSG_SERVICE_ACCEPT = byte_chr(MSG_SERVICE_ACCEPT)
+cMSG_EXT_INFO = byte_chr(MSG_EXT_INFO)
+cMSG_KEXINIT = byte_chr(MSG_KEXINIT)
+cMSG_NEWKEYS = byte_chr(MSG_NEWKEYS)
+cMSG_USERAUTH_REQUEST = byte_chr(MSG_USERAUTH_REQUEST)
+cMSG_USERAUTH_FAILURE = byte_chr(MSG_USERAUTH_FAILURE)
+cMSG_USERAUTH_SUCCESS = byte_chr(MSG_USERAUTH_SUCCESS)
+cMSG_USERAUTH_BANNER = byte_chr(MSG_USERAUTH_BANNER)
+cMSG_USERAUTH_PK_OK = byte_chr(MSG_USERAUTH_PK_OK)
+cMSG_USERAUTH_INFO_REQUEST = byte_chr(MSG_USERAUTH_INFO_REQUEST)
+cMSG_USERAUTH_INFO_RESPONSE = byte_chr(MSG_USERAUTH_INFO_RESPONSE)
+cMSG_USERAUTH_GSSAPI_RESPONSE = byte_chr(MSG_USERAUTH_GSSAPI_RESPONSE)
+cMSG_USERAUTH_GSSAPI_TOKEN = byte_chr(MSG_USERAUTH_GSSAPI_TOKEN)
+cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = byte_chr(
+ MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE
+)
+cMSG_USERAUTH_GSSAPI_ERROR = byte_chr(MSG_USERAUTH_GSSAPI_ERROR)
+cMSG_USERAUTH_GSSAPI_ERRTOK = byte_chr(MSG_USERAUTH_GSSAPI_ERRTOK)
+cMSG_USERAUTH_GSSAPI_MIC = byte_chr(MSG_USERAUTH_GSSAPI_MIC)
+cMSG_GLOBAL_REQUEST = byte_chr(MSG_GLOBAL_REQUEST)
+cMSG_REQUEST_SUCCESS = byte_chr(MSG_REQUEST_SUCCESS)
+cMSG_REQUEST_FAILURE = byte_chr(MSG_REQUEST_FAILURE)
+cMSG_CHANNEL_OPEN = byte_chr(MSG_CHANNEL_OPEN)
+cMSG_CHANNEL_OPEN_SUCCESS = byte_chr(MSG_CHANNEL_OPEN_SUCCESS)
+cMSG_CHANNEL_OPEN_FAILURE = byte_chr(MSG_CHANNEL_OPEN_FAILURE)
+cMSG_CHANNEL_WINDOW_ADJUST = byte_chr(MSG_CHANNEL_WINDOW_ADJUST)
+cMSG_CHANNEL_DATA = byte_chr(MSG_CHANNEL_DATA)
+cMSG_CHANNEL_EXTENDED_DATA = byte_chr(MSG_CHANNEL_EXTENDED_DATA)
+cMSG_CHANNEL_EOF = byte_chr(MSG_CHANNEL_EOF)
+cMSG_CHANNEL_CLOSE = byte_chr(MSG_CHANNEL_CLOSE)
+cMSG_CHANNEL_REQUEST = byte_chr(MSG_CHANNEL_REQUEST)
+cMSG_CHANNEL_SUCCESS = byte_chr(MSG_CHANNEL_SUCCESS)
+cMSG_CHANNEL_FAILURE = byte_chr(MSG_CHANNEL_FAILURE)
+
+# for debugging:
+MSG_NAMES = {
+ MSG_DISCONNECT: "disconnect",
+ MSG_IGNORE: "ignore",
+ MSG_UNIMPLEMENTED: "unimplemented",
+ MSG_DEBUG: "debug",
+ MSG_SERVICE_REQUEST: "service-request",
+ MSG_SERVICE_ACCEPT: "service-accept",
+ MSG_KEXINIT: "kexinit",
+ MSG_EXT_INFO: "ext-info",
+ MSG_NEWKEYS: "newkeys",
+ 30: "kex30",
+ 31: "kex31",
+ 32: "kex32",
+ 33: "kex33",
+ 34: "kex34",
+ 40: "kex40",
+ 41: "kex41",
+ MSG_USERAUTH_REQUEST: "userauth-request",
+ MSG_USERAUTH_FAILURE: "userauth-failure",
+ MSG_USERAUTH_SUCCESS: "userauth-success",
+ MSG_USERAUTH_BANNER: "userauth--banner",
+ MSG_USERAUTH_PK_OK: "userauth-60(pk-ok/info-request)",
+ MSG_USERAUTH_INFO_RESPONSE: "userauth-info-response",
+ MSG_GLOBAL_REQUEST: "global-request",
+ MSG_REQUEST_SUCCESS: "request-success",
+ MSG_REQUEST_FAILURE: "request-failure",
+ MSG_CHANNEL_OPEN: "channel-open",
+ MSG_CHANNEL_OPEN_SUCCESS: "channel-open-success",
+ MSG_CHANNEL_OPEN_FAILURE: "channel-open-failure",
+ MSG_CHANNEL_WINDOW_ADJUST: "channel-window-adjust",
+ MSG_CHANNEL_DATA: "channel-data",
+ MSG_CHANNEL_EXTENDED_DATA: "channel-extended-data",
+ MSG_CHANNEL_EOF: "channel-eof",
+ MSG_CHANNEL_CLOSE: "channel-close",
+ MSG_CHANNEL_REQUEST: "channel-request",
+ MSG_CHANNEL_SUCCESS: "channel-success",
+ MSG_CHANNEL_FAILURE: "channel-failure",
+ MSG_USERAUTH_GSSAPI_RESPONSE: "userauth-gssapi-response",
+ MSG_USERAUTH_GSSAPI_TOKEN: "userauth-gssapi-token",
+ MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE: "userauth-gssapi-exchange-complete",
+ MSG_USERAUTH_GSSAPI_ERROR: "userauth-gssapi-error",
+ MSG_USERAUTH_GSSAPI_ERRTOK: "userauth-gssapi-error-token",
+ MSG_USERAUTH_GSSAPI_MIC: "userauth-gssapi-mic",
+}
+
+
+# authentication request return codes:
+AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED = range(3)
+
+
+# channel request failed reasons:
+(
+ OPEN_SUCCEEDED,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ OPEN_FAILED_CONNECT_FAILED,
+ OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
+ OPEN_FAILED_RESOURCE_SHORTAGE,
+) = range(0, 5)
+
+
+CONNECTION_FAILED_CODE = {
+ 1: "Administratively prohibited",
+ 2: "Connect failed",
+ 3: "Unknown channel type",
+ 4: "Resource shortage",
+}
+
+
+(
+ DISCONNECT_SERVICE_NOT_AVAILABLE,
+ DISCONNECT_AUTH_CANCELLED_BY_USER,
+ DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
+) = (7, 13, 14)
+
+zero_byte = byte_chr(0)
+one_byte = byte_chr(1)
+four_byte = byte_chr(4)
+max_byte = byte_chr(0xFF)
+cr_byte = byte_chr(13)
+linefeed_byte = byte_chr(10)
+crlf = cr_byte + linefeed_byte
+cr_byte_value = 13
+linefeed_byte_value = 10
+
+
+xffffffff = 0xFFFFFFFF
+x80000000 = 0x80000000
+o666 = 438
+o660 = 432
+o644 = 420
+o600 = 384
+o777 = 511
+o700 = 448
+o70 = 56
+
+DEBUG = logging.DEBUG
+INFO = logging.INFO
+WARNING = logging.WARNING
+ERROR = logging.ERROR
+CRITICAL = logging.CRITICAL
+
+# Common IO/select/etc sleep period, in seconds
+io_sleep = 0.01
+
+DEFAULT_WINDOW_SIZE = 64 * 2**15
+DEFAULT_MAX_PACKET_SIZE = 2**15
+
+# lower bound on the max packet size we'll accept from the remote host
+# Minimum packet size is 32768 bytes according to
+# http://www.ietf.org/rfc/rfc4254.txt
+MIN_WINDOW_SIZE = 2**15
+
+# However, according to http://www.ietf.org/rfc/rfc4253.txt it is perfectly
+# legal to accept a size much smaller, as OpenSSH client does as size 16384.
+MIN_PACKET_SIZE = 2**12
+
+# Max windows size according to http://www.ietf.org/rfc/rfc4254.txt
+MAX_WINDOW_SIZE = 2**32 - 1
diff --git a/paramiko/compress.py b/paramiko/compress.py
new file mode 100644
index 0000000..18ff484
--- /dev/null
+++ b/paramiko/compress.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Compression implementations for a Transport.
+"""
+
+import zlib
+
+
+class ZlibCompressor:
+ def __init__(self):
+ # Use the default level of zlib compression
+ self.z = zlib.compressobj()
+
+ def __call__(self, data):
+ return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
+
+
+class ZlibDecompressor:
+ def __init__(self):
+ self.z = zlib.decompressobj()
+
+ def __call__(self, data):
+ return self.z.decompress(data)
diff --git a/paramiko/config.py b/paramiko/config.py
new file mode 100644
index 0000000..8ab55c6
--- /dev/null
+++ b/paramiko/config.py
@@ -0,0 +1,696 @@
+# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
+# Copyright (C) 2012 Olle Lundberg <geek@nerd.sh>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Configuration file (aka ``ssh_config``) support.
+"""
+
+import fnmatch
+import getpass
+import os
+import re
+import shlex
+import socket
+from hashlib import sha1
+from io import StringIO
+from functools import partial
+
+invoke, invoke_import_error = None, None
+try:
+ import invoke
+except ImportError as e:
+ invoke_import_error = e
+
+from .ssh_exception import CouldNotCanonicalize, ConfigParseError
+
+
+SSH_PORT = 22
+
+
+class SSHConfig:
+ """
+ Representation of config information as stored in the format used by
+ OpenSSH. Queries can be made via `lookup`. The format is described in
+ OpenSSH's ``ssh_config`` man page. This class is provided primarily as a
+ convenience to posix users (since the OpenSSH format is a de-facto
+ standard on posix) but should work fine on Windows too.
+
+ .. versionadded:: 1.6
+ """
+
+ SETTINGS_REGEX = re.compile(r"(\w+)(?:\s*=\s*|\s+)(.+)")
+
+ # TODO: do a full scan of ssh.c & friends to make sure we're fully
+ # compatible across the board, e.g. OpenSSH 8.1 added %n to ProxyCommand.
+ TOKENS_BY_CONFIG_KEY = {
+ "controlpath": ["%C", "%h", "%l", "%L", "%n", "%p", "%r", "%u"],
+ "hostname": ["%h"],
+ "identityfile": ["%C", "~", "%d", "%h", "%l", "%u", "%r"],
+ "proxycommand": ["~", "%h", "%p", "%r"],
+ "proxyjump": ["%h", "%p", "%r"],
+ # Doesn't seem worth making this 'special' for now, it will fit well
+ # enough (no actual match-exec config key to be confused with).
+ "match-exec": ["%C", "%d", "%h", "%L", "%l", "%n", "%p", "%r", "%u"],
+ }
+
+ def __init__(self):
+ """
+ Create a new OpenSSH config object.
+
+ Note: the newer alternate constructors `from_path`, `from_file` and
+ `from_text` are simpler to use, as they parse on instantiation. For
+ example, instead of::
+
+ config = SSHConfig()
+ config.parse(open("some-path.config")
+
+ you could::
+
+ config = SSHConfig.from_file(open("some-path.config"))
+ # Or more directly:
+ config = SSHConfig.from_path("some-path.config")
+ # Or if you have arbitrary ssh_config text from some other source:
+ config = SSHConfig.from_text("Host foo\\n\\tUser bar")
+ """
+ self._config = []
+
+ @classmethod
+ def from_text(cls, text):
+ """
+ Create a new, parsed `SSHConfig` from ``text`` string.
+
+ .. versionadded:: 2.7
+ """
+ return cls.from_file(StringIO(text))
+
+ @classmethod
+ def from_path(cls, path):
+ """
+ Create a new, parsed `SSHConfig` from the file found at ``path``.
+
+ .. versionadded:: 2.7
+ """
+ with open(path) as flo:
+ return cls.from_file(flo)
+
+ @classmethod
+ def from_file(cls, flo):
+ """
+ Create a new, parsed `SSHConfig` from file-like object ``flo``.
+
+ .. versionadded:: 2.7
+ """
+ obj = cls()
+ obj.parse(flo)
+ return obj
+
+ def parse(self, file_obj):
+ """
+ Read an OpenSSH config from the given file object.
+
+ :param file_obj: a file-like object to read the config file from
+ """
+ # Start out w/ implicit/anonymous global host-like block to hold
+ # anything not contained by an explicit one.
+ context = {"host": ["*"], "config": {}}
+ for line in file_obj:
+ # Strip any leading or trailing whitespace from the line.
+ # Refer to https://github.com/paramiko/paramiko/issues/499
+ line = line.strip()
+ # Skip blanks, comments
+ if not line or line.startswith("#"):
+ continue
+
+ # Parse line into key, value
+ match = re.match(self.SETTINGS_REGEX, line)
+ if not match:
+ raise ConfigParseError("Unparsable line {}".format(line))
+ key = match.group(1).lower()
+ value = match.group(2)
+
+ # Host keyword triggers switch to new block/context
+ if key in ("host", "match"):
+ self._config.append(context)
+ context = {"config": {}}
+ if key == "host":
+ # TODO 4.0: make these real objects or at least name this
+ # "hosts" to acknowledge it's an iterable. (Doing so prior
+ # to 3.0, despite it being a private API, feels bad -
+ # surely such an old codebase has folks actually relying on
+ # these keys.)
+ context["host"] = self._get_hosts(value)
+ else:
+ context["matches"] = self._get_matches(value)
+ # Special-case for noop ProxyCommands
+ elif key == "proxycommand" and value.lower() == "none":
+ # Store 'none' as None - not as a string implying that the
+ # proxycommand is the literal shell command "none"!
+ context["config"][key] = None
+ # All other keywords get stored, directly or via append
+ else:
+ if value.startswith('"') and value.endswith('"'):
+ value = value[1:-1]
+
+ # identityfile, localforward, remoteforward keys are special
+ # cases, since they are allowed to be specified multiple times
+ # and they should be tried in order of specification.
+ if key in ["identityfile", "localforward", "remoteforward"]:
+ if key in context["config"]:
+ context["config"][key].append(value)
+ else:
+ context["config"][key] = [value]
+ elif key not in context["config"]:
+ context["config"][key] = value
+ # Store last 'open' block and we're done
+ self._config.append(context)
+
+ def lookup(self, hostname):
+ """
+ Return a dict (`SSHConfigDict`) of config options for a given hostname.
+
+ The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
+ For each parameter, the first obtained value will be used. The
+ configuration files contain sections separated by ``Host`` and/or
+ ``Match`` specifications, and that section is only applied for hosts
+ which match the given patterns or keywords
+
+ Since the first obtained value for each parameter is used, more host-
+ specific declarations should be given near the beginning of the file,
+ and general defaults at the end.
+
+ The keys in the returned dict are all normalized to lowercase (look for
+ ``"port"``, not ``"Port"``. The values are processed according to the
+ rules for substitution variable expansion in ``ssh_config``.
+
+ Finally, please see the docs for `SSHConfigDict` for deeper info on
+ features such as optional type conversion methods, e.g.::
+
+ conf = my_config.lookup('myhost')
+ assert conf['passwordauthentication'] == 'yes'
+ assert conf.as_bool('passwordauthentication') is True
+
+ .. note::
+ If there is no explicitly configured ``HostName`` value, it will be
+ set to the being-looked-up hostname, which is as close as we can
+ get to OpenSSH's behavior around that particular option.
+
+ :param str hostname: the hostname to lookup
+
+ .. versionchanged:: 2.5
+ Returns `SSHConfigDict` objects instead of dict literals.
+ .. versionchanged:: 2.7
+ Added canonicalization support.
+ .. versionchanged:: 2.7
+ Added ``Match`` support.
+ .. versionchanged:: 3.3
+ Added ``Match final`` support.
+ """
+ # First pass
+ options = self._lookup(hostname=hostname)
+ # Inject HostName if it was not set (this used to be done incidentally
+ # during tokenization, for some reason).
+ if "hostname" not in options:
+ options["hostname"] = hostname
+ # Handle canonicalization
+ canon = options.get("canonicalizehostname", None) in ("yes", "always")
+ maxdots = int(options.get("canonicalizemaxdots", 1))
+ if canon and hostname.count(".") <= maxdots:
+ # NOTE: OpenSSH manpage does not explicitly state this, but its
+ # implementation for CanonicalDomains is 'split on any whitespace'.
+ domains = options["canonicaldomains"].split()
+ hostname = self.canonicalize(hostname, options, domains)
+ # Overwrite HostName again here (this is also what OpenSSH does)
+ options["hostname"] = hostname
+ options = self._lookup(
+ hostname, options, canonical=True, final=True
+ )
+ else:
+ options = self._lookup(
+ hostname, options, canonical=False, final=True
+ )
+ return options
+
+ def _lookup(self, hostname, options=None, canonical=False, final=False):
+ # Init
+ if options is None:
+ options = SSHConfigDict()
+ # Iterate all stanzas, applying any that match, in turn (so that things
+ # like Match can reference currently understood state)
+ for context in self._config:
+ if not (
+ self._pattern_matches(context.get("host", []), hostname)
+ or self._does_match(
+ context.get("matches", []),
+ hostname,
+ canonical,
+ final,
+ options,
+ )
+ ):
+ continue
+ for key, value in context["config"].items():
+ if key not in options:
+ # Create a copy of the original value,
+ # else it will reference the original list
+ # in self._config and update that value too
+ # when the extend() is being called.
+ options[key] = value[:] if value is not None else value
+ elif key == "identityfile":
+ options[key].extend(
+ x for x in value if x not in options[key]
+ )
+ if final:
+ # Expand variables in resulting values
+ # (besides 'Match exec' which was already handled above)
+ options = self._expand_variables(options, hostname)
+ return options
+
+ def canonicalize(self, hostname, options, domains):
+ """
+ Return canonicalized version of ``hostname``.
+
+ :param str hostname: Target hostname.
+ :param options: An `SSHConfigDict` from a previous lookup pass.
+ :param domains: List of domains (e.g. ``["paramiko.org"]``).
+
+ :returns: A canonicalized hostname if one was found, else ``None``.
+
+ .. versionadded:: 2.7
+ """
+ found = False
+ for domain in domains:
+ candidate = "{}.{}".format(hostname, domain)
+ family_specific = _addressfamily_host_lookup(candidate, options)
+ if family_specific is not None:
+ # TODO: would we want to dig deeper into other results? e.g. to
+ # find something that satisfies PermittedCNAMEs when that is
+ # implemented?
+ found = family_specific[0]
+ else:
+ # TODO: what does ssh use here and is there a reason to use
+ # that instead of gethostbyname?
+ try:
+ found = socket.gethostbyname(candidate)
+ except socket.gaierror:
+ pass
+ if found:
+ # TODO: follow CNAME (implied by found != candidate?) if
+ # CanonicalizePermittedCNAMEs allows it
+ return candidate
+ # If we got here, it means canonicalization failed.
+ # When CanonicalizeFallbackLocal is undefined or 'yes', we just spit
+ # back the original hostname.
+ if options.get("canonicalizefallbacklocal", "yes") == "yes":
+ return hostname
+ # And here, we failed AND fallback was set to a non-yes value, so we
+ # need to get mad.
+ raise CouldNotCanonicalize(hostname)
+
+ def get_hostnames(self):
+ """
+ Return the set of literal hostnames defined in the SSH config (both
+ explicit hostnames and wildcard entries).
+ """
+ hosts = set()
+ for entry in self._config:
+ hosts.update(entry["host"])
+ return hosts
+
+ def _pattern_matches(self, patterns, target):
+ # Convenience auto-splitter if not already a list
+ if hasattr(patterns, "split"):
+ patterns = patterns.split(",")
+ match = False
+ for pattern in patterns:
+ # Short-circuit if target matches a negated pattern
+ if pattern.startswith("!") and fnmatch.fnmatch(
+ target, pattern[1:]
+ ):
+ return False
+ # Flag a match, but continue (in case of later negation) if regular
+ # match occurs
+ elif fnmatch.fnmatch(target, pattern):
+ match = True
+ return match
+
+ def _does_match(
+ self, match_list, target_hostname, canonical, final, options
+ ):
+ matched = []
+ candidates = match_list[:]
+ local_username = getpass.getuser()
+ while candidates:
+ candidate = candidates.pop(0)
+ passed = None
+ # Obtain latest host/user value every loop, so later Match may
+ # reference values assigned within a prior Match.
+ configured_host = options.get("hostname", None)
+ configured_user = options.get("user", None)
+ type_, param = candidate["type"], candidate["param"]
+ # Canonical is a hard pass/fail based on whether this is a
+ # canonicalized re-lookup.
+ if type_ == "canonical":
+ if self._should_fail(canonical, candidate):
+ return False
+ if type_ == "final":
+ passed = final
+ # The parse step ensures we only see this by itself or after
+ # canonical, so it's also an easy hard pass. (No negation here as
+ # that would be uh, pretty weird?)
+ elif type_ == "all":
+ return True
+ # From here, we are testing various non-hard criteria,
+ # short-circuiting only on fail
+ elif type_ == "host":
+ hostval = configured_host or target_hostname
+ passed = self._pattern_matches(param, hostval)
+ elif type_ == "originalhost":
+ passed = self._pattern_matches(param, target_hostname)
+ elif type_ == "user":
+ user = configured_user or local_username
+ passed = self._pattern_matches(param, user)
+ elif type_ == "localuser":
+ passed = self._pattern_matches(param, local_username)
+ elif type_ == "exec":
+ exec_cmd = self._tokenize(
+ options, target_hostname, "match-exec", param
+ )
+ # This is the laziest spot in which we can get mad about an
+ # inability to import Invoke.
+ if invoke is None:
+ raise invoke_import_error
+ # Like OpenSSH, we 'redirect' stdout but let stderr bubble up
+ passed = invoke.run(exec_cmd, hide="stdout", warn=True).ok
+ # Tackle any 'passed, but was negated' results from above
+ if passed is not None and self._should_fail(passed, candidate):
+ return False
+ # Made it all the way here? Everything matched!
+ matched.append(candidate)
+ # Did anything match? (To be treated as bool, usually.)
+ return matched
+
+ def _should_fail(self, would_pass, candidate):
+ return would_pass if candidate["negate"] else not would_pass
+
+ def _tokenize(self, config, target_hostname, key, value):
+ """
+ Tokenize a string based on current config/hostname data.
+
+ :param config: Current config data.
+ :param target_hostname: Original target connection hostname.
+ :param key: Config key being tokenized (used to filter token list).
+ :param value: Config value being tokenized.
+
+ :returns: The tokenized version of the input ``value`` string.
+ """
+ allowed_tokens = self._allowed_tokens(key)
+ # Short-circuit if no tokenization possible
+ if not allowed_tokens:
+ return value
+ # Obtain potentially configured hostname, for use with %h.
+ # Special-case where we are tokenizing the hostname itself, to avoid
+ # replacing %h with a %h-bearing value, etc.
+ configured_hostname = target_hostname
+ if key != "hostname":
+ configured_hostname = config.get("hostname", configured_hostname)
+ # Ditto the rest of the source values
+ if "port" in config:
+ port = config["port"]
+ else:
+ port = SSH_PORT
+ user = getpass.getuser()
+ if "user" in config:
+ remoteuser = config["user"]
+ else:
+ remoteuser = user
+ local_hostname = socket.gethostname().split(".")[0]
+ local_fqdn = LazyFqdn(config, local_hostname)
+ homedir = os.path.expanduser("~")
+ tohash = local_hostname + target_hostname + repr(port) + remoteuser
+ # The actual tokens!
+ replacements = {
+ # TODO: %%???
+ "%C": sha1(tohash.encode()).hexdigest(),
+ "%d": homedir,
+ "%h": configured_hostname,
+ # TODO: %i?
+ "%L": local_hostname,
+ "%l": local_fqdn,
+ # also this is pseudo buggy when not in Match exec mode so document
+ # that. also WHY is that the case?? don't we do all of this late?
+ "%n": target_hostname,
+ "%p": port,
+ "%r": remoteuser,
+ # TODO: %T? don't believe this is possible however
+ "%u": user,
+ "~": homedir,
+ }
+ # Do the thing with the stuff
+ tokenized = value
+ for find, replace in replacements.items():
+ if find not in allowed_tokens:
+ continue
+ tokenized = tokenized.replace(find, str(replace))
+ # TODO: log? eg that value -> tokenized
+ return tokenized
+
+ def _allowed_tokens(self, key):
+ """
+ Given config ``key``, return list of token strings to tokenize.
+
+ .. note::
+ This feels like it wants to eventually go away, but is used to
+ preserve as-strict-as-possible compatibility with OpenSSH, which
+ for whatever reason only applies some tokens to some config keys.
+ """
+ return self.TOKENS_BY_CONFIG_KEY.get(key, [])
+
+ def _expand_variables(self, config, target_hostname):
+ """
+ Return a dict of config options with expanded substitutions
+ for a given original & current target hostname.
+
+ Please refer to :doc:`/api/config` for details.
+
+ :param dict config: the currently parsed config
+ :param str hostname: the hostname whose config is being looked up
+ """
+ for k in config:
+ if config[k] is None:
+ continue
+ tokenizer = partial(self._tokenize, config, target_hostname, k)
+ if isinstance(config[k], list):
+ for i, value in enumerate(config[k]):
+ config[k][i] = tokenizer(value)
+ else:
+ config[k] = tokenizer(config[k])
+ return config
+
+ def _get_hosts(self, host):
+ """
+ Return a list of host_names from host value.
+ """
+ try:
+ return shlex.split(host)
+ except ValueError:
+ raise ConfigParseError("Unparsable host {}".format(host))
+
+ def _get_matches(self, match):
+ """
+ Parse a specific Match config line into a list-of-dicts for its values.
+
+ Performs some parse-time validation as well.
+ """
+ matches = []
+ tokens = shlex.split(match)
+ while tokens:
+ match = {"type": None, "param": None, "negate": False}
+ type_ = tokens.pop(0)
+ # Handle per-keyword negation
+ if type_.startswith("!"):
+ match["negate"] = True
+ type_ = type_[1:]
+ match["type"] = type_
+ # all/canonical have no params (everything else does)
+ if type_ in ("all", "canonical", "final"):
+ matches.append(match)
+ continue
+ if not tokens:
+ raise ConfigParseError(
+ "Missing parameter to Match '{}' keyword".format(type_)
+ )
+ match["param"] = tokens.pop(0)
+ matches.append(match)
+ # Perform some (easier to do now than in the middle) validation that is
+ # better handled here than at lookup time.
+ keywords = [x["type"] for x in matches]
+ if "all" in keywords:
+ allowable = ("all", "canonical")
+ ok, bad = (
+ list(filter(lambda x: x in allowable, keywords)),
+ list(filter(lambda x: x not in allowable, keywords)),
+ )
+ err = None
+ if any(bad):
+ err = "Match does not allow 'all' mixed with anything but 'canonical'" # noqa
+ elif "canonical" in ok and ok.index("canonical") > ok.index("all"):
+ err = "Match does not allow 'all' before 'canonical'"
+ if err is not None:
+ raise ConfigParseError(err)
+ return matches
+
+
+def _addressfamily_host_lookup(hostname, options):
+ """
+ Try looking up ``hostname`` in an IPv4 or IPv6 specific manner.
+
+ This is an odd duck due to needing use in two divergent use cases. It looks
+ up ``AddressFamily`` in ``options`` and if it is ``inet`` or ``inet6``,
+ this function uses `socket.getaddrinfo` to perform a family-specific
+ lookup, returning the result if successful.
+
+ In any other situation -- lookup failure, or ``AddressFamily`` being
+ unspecified or ``any`` -- ``None`` is returned instead and the caller is
+ expected to do something situation-appropriate like calling
+ `socket.gethostbyname`.
+
+ :param str hostname: Hostname to look up.
+ :param options: `SSHConfigDict` instance w/ parsed options.
+ :returns: ``getaddrinfo``-style tuples, or ``None``, depending.
+ """
+ address_family = options.get("addressfamily", "any").lower()
+ if address_family == "any":
+ return
+ try:
+ family = socket.AF_INET6
+ if address_family == "inet":
+ family = socket.AF_INET
+ return socket.getaddrinfo(
+ hostname,
+ None,
+ family,
+ socket.SOCK_DGRAM,
+ socket.IPPROTO_IP,
+ socket.AI_CANONNAME,
+ )
+ except socket.gaierror:
+ pass
+
+
+class LazyFqdn:
+ """
+ Returns the host's fqdn on request as string.
+ """
+
+ def __init__(self, config, host=None):
+ self.fqdn = None
+ self.config = config
+ self.host = host
+
+ def __str__(self):
+ if self.fqdn is None:
+ #
+ # If the SSH config contains AddressFamily, use that when
+ # determining the local host's FQDN. Using socket.getfqdn() from
+ # the standard library is the most general solution, but can
+ # result in noticeable delays on some platforms when IPv6 is
+ # misconfigured or not available, as it calls getaddrinfo with no
+ # address family specified, so both IPv4 and IPv6 are checked.
+ #
+
+ # Handle specific option
+ fqdn = None
+ results = _addressfamily_host_lookup(self.host, self.config)
+ if results is not None:
+ for res in results:
+ af, socktype, proto, canonname, sa = res
+ if canonname and "." in canonname:
+ fqdn = canonname
+ break
+ # Handle 'any' / unspecified / lookup failure
+ if fqdn is None:
+ fqdn = socket.getfqdn()
+ # Cache
+ self.fqdn = fqdn
+ return self.fqdn
+
+
+class SSHConfigDict(dict):
+ """
+ A dictionary wrapper/subclass for per-host configuration structures.
+
+ This class introduces some usage niceties for consumers of `SSHConfig`,
+ specifically around the issue of variable type conversions: normal value
+ access yields strings, but there are now methods such as `as_bool` and
+ `as_int` that yield casted values instead.
+
+ For example, given the following ``ssh_config`` file snippet::
+
+ Host foo.example.com
+ PasswordAuthentication no
+ Compression yes
+ ServerAliveInterval 60
+
+ the following code highlights how you can access the raw strings as well as
+ usefully Python type-casted versions (recalling that keys are all
+ normalized to lowercase first)::
+
+ my_config = SSHConfig()
+ my_config.parse(open('~/.ssh/config'))
+ conf = my_config.lookup('foo.example.com')
+
+ assert conf['passwordauthentication'] == 'no'
+ assert conf.as_bool('passwordauthentication') is False
+ assert conf['compression'] == 'yes'
+ assert conf.as_bool('compression') is True
+ assert conf['serveraliveinterval'] == '60'
+ assert conf.as_int('serveraliveinterval') == 60
+
+ .. versionadded:: 2.5
+ """
+
+ def as_bool(self, key):
+ """
+ Express given key's value as a boolean type.
+
+ Typically, this is used for ``ssh_config``'s pseudo-boolean values
+ which are either ``"yes"`` or ``"no"``. In such cases, ``"yes"`` yields
+ ``True`` and any other value becomes ``False``.
+
+ .. note::
+ If (for whatever reason) the stored value is already boolean in
+ nature, it's simply returned.
+
+ .. versionadded:: 2.5
+ """
+ val = self[key]
+ if isinstance(val, bool):
+ return val
+ return val.lower() == "yes"
+
+ def as_int(self, key):
+ """
+ Express given key's value as an integer, if possible.
+
+ This method will raise ``ValueError`` or similar if the value is not
+ int-appropriate, same as the builtin `int` type.
+
+ .. versionadded:: 2.5
+ """
+ return int(self[key])
diff --git a/paramiko/dsskey.py b/paramiko/dsskey.py
new file mode 100644
index 0000000..5215d28
--- /dev/null
+++ b/paramiko/dsskey.py
@@ -0,0 +1,258 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+DSS keys.
+"""
+
+from cryptography.exceptions import InvalidSignature
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes, serialization
+from cryptography.hazmat.primitives.asymmetric import dsa
+from cryptography.hazmat.primitives.asymmetric.utils import (
+ decode_dss_signature,
+ encode_dss_signature,
+)
+
+from paramiko import util
+from paramiko.common import zero_byte
+from paramiko.ssh_exception import SSHException
+from paramiko.message import Message
+from paramiko.ber import BER, BERException
+from paramiko.pkey import PKey
+
+
+class DSSKey(PKey):
+ """
+ Representation of a DSS key which can be used to sign an verify SSH2
+ data.
+ """
+
+ name = "ssh-dss"
+
+ def __init__(
+ self,
+ msg=None,
+ data=None,
+ filename=None,
+ password=None,
+ vals=None,
+ file_obj=None,
+ ):
+ self.p = None
+ self.q = None
+ self.g = None
+ self.y = None
+ self.x = None
+ self.public_blob = None
+ if file_obj is not None:
+ self._from_private_key(file_obj, password)
+ return
+ if filename is not None:
+ self._from_private_key_file(filename, password)
+ return
+ if (msg is None) and (data is not None):
+ msg = Message(data)
+ if vals is not None:
+ self.p, self.q, self.g, self.y = vals
+ else:
+ self._check_type_and_load_cert(
+ msg=msg,
+ key_type=self.name,
+ cert_type=f"{self.name}-cert-v01@openssh.com",
+ )
+ self.p = msg.get_mpint()
+ self.q = msg.get_mpint()
+ self.g = msg.get_mpint()
+ self.y = msg.get_mpint()
+ self.size = util.bit_length(self.p)
+
+ def asbytes(self):
+ m = Message()
+ m.add_string(self.name)
+ m.add_mpint(self.p)
+ m.add_mpint(self.q)
+ m.add_mpint(self.g)
+ m.add_mpint(self.y)
+ return m.asbytes()
+
+ def __str__(self):
+ return self.asbytes()
+
+ @property
+ def _fields(self):
+ return (self.get_name(), self.p, self.q, self.g, self.y)
+
+ # TODO 4.0: remove
+ def get_name(self):
+ return self.name
+
+ def get_bits(self):
+ return self.size
+
+ def can_sign(self):
+ return self.x is not None
+
+ def sign_ssh_data(self, data, algorithm=None):
+ key = dsa.DSAPrivateNumbers(
+ x=self.x,
+ public_numbers=dsa.DSAPublicNumbers(
+ y=self.y,
+ parameter_numbers=dsa.DSAParameterNumbers(
+ p=self.p, q=self.q, g=self.g
+ ),
+ ),
+ ).private_key(backend=default_backend())
+ sig = key.sign(data, hashes.SHA1())
+ r, s = decode_dss_signature(sig)
+
+ m = Message()
+ m.add_string(self.name)
+ # apparently, in rare cases, r or s may be shorter than 20 bytes!
+ rstr = util.deflate_long(r, 0)
+ sstr = util.deflate_long(s, 0)
+ if len(rstr) < 20:
+ rstr = zero_byte * (20 - len(rstr)) + rstr
+ if len(sstr) < 20:
+ sstr = zero_byte * (20 - len(sstr)) + sstr
+ m.add_string(rstr + sstr)
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ if len(msg.asbytes()) == 40:
+ # spies.com bug: signature has no header
+ sig = msg.asbytes()
+ else:
+ kind = msg.get_text()
+ if kind != self.name:
+ return 0
+ sig = msg.get_binary()
+
+ # pull out (r, s) which are NOT encoded as mpints
+ sigR = util.inflate_long(sig[:20], 1)
+ sigS = util.inflate_long(sig[20:], 1)
+
+ signature = encode_dss_signature(sigR, sigS)
+
+ key = dsa.DSAPublicNumbers(
+ y=self.y,
+ parameter_numbers=dsa.DSAParameterNumbers(
+ p=self.p, q=self.q, g=self.g
+ ),
+ ).public_key(backend=default_backend())
+ try:
+ key.verify(signature, data, hashes.SHA1())
+ except InvalidSignature:
+ return False
+ else:
+ return True
+
+ def write_private_key_file(self, filename, password=None):
+ key = dsa.DSAPrivateNumbers(
+ x=self.x,
+ public_numbers=dsa.DSAPublicNumbers(
+ y=self.y,
+ parameter_numbers=dsa.DSAParameterNumbers(
+ p=self.p, q=self.q, g=self.g
+ ),
+ ),
+ ).private_key(backend=default_backend())
+
+ self._write_private_key_file(
+ filename,
+ key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ def write_private_key(self, file_obj, password=None):
+ key = dsa.DSAPrivateNumbers(
+ x=self.x,
+ public_numbers=dsa.DSAPublicNumbers(
+ y=self.y,
+ parameter_numbers=dsa.DSAParameterNumbers(
+ p=self.p, q=self.q, g=self.g
+ ),
+ ),
+ ).private_key(backend=default_backend())
+
+ self._write_private_key(
+ file_obj,
+ key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ @staticmethod
+ def generate(bits=1024, progress_func=None):
+ """
+ Generate a new private DSS key. This factory function can be used to
+ generate a new host key or authentication key.
+
+ :param int bits: number of bits the generated key should be.
+ :param progress_func: Unused
+ :return: new `.DSSKey` private key
+ """
+ numbers = dsa.generate_private_key(
+ bits, backend=default_backend()
+ ).private_numbers()
+ key = DSSKey(
+ vals=(
+ numbers.public_numbers.parameter_numbers.p,
+ numbers.public_numbers.parameter_numbers.q,
+ numbers.public_numbers.parameter_numbers.g,
+ numbers.public_numbers.y,
+ )
+ )
+ key.x = numbers.x
+ return key
+
+ # ...internals...
+
+ def _from_private_key_file(self, filename, password):
+ data = self._read_private_key_file("DSA", filename, password)
+ self._decode_key(data)
+
+ def _from_private_key(self, file_obj, password):
+ data = self._read_private_key("DSA", file_obj, password)
+ self._decode_key(data)
+
+ def _decode_key(self, data):
+ pkformat, data = data
+ # private key file contains:
+ # DSAPrivateKey = { version = 0, p, q, g, y, x }
+ if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
+ try:
+ keylist = BER(data).decode()
+ except BERException as e:
+ raise SSHException("Unable to parse key file: {}".format(e))
+ elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
+ keylist = self._uint32_cstruct_unpack(data, "iiiii")
+ keylist = [0] + list(keylist)
+ else:
+ self._got_bad_key_format_id(pkformat)
+ if type(keylist) is not list or len(keylist) < 6 or keylist[0] != 0:
+ raise SSHException(
+ "not a valid DSA private key file (bad ber encoding)"
+ )
+ self.p = keylist[1]
+ self.q = keylist[2]
+ self.g = keylist[3]
+ self.y = keylist[4]
+ self.x = keylist[5]
+ self.size = util.bit_length(self.p)
diff --git a/paramiko/ecdsakey.py b/paramiko/ecdsakey.py
new file mode 100644
index 0000000..6fd95fa
--- /dev/null
+++ b/paramiko/ecdsakey.py
@@ -0,0 +1,339 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ECDSA keys
+"""
+
+from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes, serialization
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives.asymmetric.utils import (
+ decode_dss_signature,
+ encode_dss_signature,
+)
+
+from paramiko.common import four_byte
+from paramiko.message import Message
+from paramiko.pkey import PKey
+from paramiko.ssh_exception import SSHException
+from paramiko.util import deflate_long
+
+
+class _ECDSACurve:
+ """
+ Represents a specific ECDSA Curve (nistp256, nistp384, etc).
+
+ Handles the generation of the key format identifier and the selection of
+ the proper hash function. Also grabs the proper curve from the 'ecdsa'
+ package.
+ """
+
+ def __init__(self, curve_class, nist_name):
+ self.nist_name = nist_name
+ self.key_length = curve_class.key_size
+
+ # Defined in RFC 5656 6.2
+ self.key_format_identifier = "ecdsa-sha2-" + self.nist_name
+
+ # Defined in RFC 5656 6.2.1
+ if self.key_length <= 256:
+ self.hash_object = hashes.SHA256
+ elif self.key_length <= 384:
+ self.hash_object = hashes.SHA384
+ else:
+ self.hash_object = hashes.SHA512
+
+ self.curve_class = curve_class
+
+
+class _ECDSACurveSet:
+ """
+ A collection to hold the ECDSA curves. Allows querying by oid and by key
+ format identifier. The two ways in which ECDSAKey needs to be able to look
+ up curves.
+ """
+
+ def __init__(self, ecdsa_curves):
+ self.ecdsa_curves = ecdsa_curves
+
+ def get_key_format_identifier_list(self):
+ return [curve.key_format_identifier for curve in self.ecdsa_curves]
+
+ def get_by_curve_class(self, curve_class):
+ for curve in self.ecdsa_curves:
+ if curve.curve_class == curve_class:
+ return curve
+
+ def get_by_key_format_identifier(self, key_format_identifier):
+ for curve in self.ecdsa_curves:
+ if curve.key_format_identifier == key_format_identifier:
+ return curve
+
+ def get_by_key_length(self, key_length):
+ for curve in self.ecdsa_curves:
+ if curve.key_length == key_length:
+ return curve
+
+
+class ECDSAKey(PKey):
+ """
+ Representation of an ECDSA key which can be used to sign and verify SSH2
+ data.
+ """
+
+ _ECDSA_CURVES = _ECDSACurveSet(
+ [
+ _ECDSACurve(ec.SECP256R1, "nistp256"),
+ _ECDSACurve(ec.SECP384R1, "nistp384"),
+ _ECDSACurve(ec.SECP521R1, "nistp521"),
+ ]
+ )
+
+ def __init__(
+ self,
+ msg=None,
+ data=None,
+ filename=None,
+ password=None,
+ vals=None,
+ file_obj=None,
+ # TODO 4.0: remove; it does nothing since porting to cryptography.io
+ validate_point=True,
+ ):
+ self.verifying_key = None
+ self.signing_key = None
+ self.public_blob = None
+ if file_obj is not None:
+ self._from_private_key(file_obj, password)
+ return
+ if filename is not None:
+ self._from_private_key_file(filename, password)
+ return
+ if (msg is None) and (data is not None):
+ msg = Message(data)
+ if vals is not None:
+ self.signing_key, self.verifying_key = vals
+ c_class = self.signing_key.curve.__class__
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(c_class)
+ else:
+ # Must set ecdsa_curve first; subroutines called herein may need to
+ # spit out our get_name(), which relies on this.
+ key_type = msg.get_text()
+ # But this also means we need to hand it a real key/curve
+ # identifier, so strip out any cert business. (NOTE: could push
+ # that into _ECDSACurveSet.get_by_key_format_identifier(), but it
+ # feels more correct to do it here?)
+ suffix = "-cert-v01@openssh.com"
+ if key_type.endswith(suffix):
+ key_type = key_type[: -len(suffix)]
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_key_format_identifier(
+ key_type
+ )
+ key_types = self._ECDSA_CURVES.get_key_format_identifier_list()
+ cert_types = [
+ "{}-cert-v01@openssh.com".format(x) for x in key_types
+ ]
+ self._check_type_and_load_cert(
+ msg=msg, key_type=key_types, cert_type=cert_types
+ )
+ curvename = msg.get_text()
+ if curvename != self.ecdsa_curve.nist_name:
+ raise SSHException(
+ "Can't handle curve of type {}".format(curvename)
+ )
+
+ pointinfo = msg.get_binary()
+ try:
+ key = ec.EllipticCurvePublicKey.from_encoded_point(
+ self.ecdsa_curve.curve_class(), pointinfo
+ )
+ self.verifying_key = key
+ except ValueError:
+ raise SSHException("Invalid public key")
+
+ @classmethod
+ def identifiers(cls):
+ return cls._ECDSA_CURVES.get_key_format_identifier_list()
+
+ # TODO 4.0: deprecate/remove
+ @classmethod
+ def supported_key_format_identifiers(cls):
+ return cls.identifiers()
+
+ def asbytes(self):
+ key = self.verifying_key
+ m = Message()
+ m.add_string(self.ecdsa_curve.key_format_identifier)
+ m.add_string(self.ecdsa_curve.nist_name)
+
+ numbers = key.public_numbers()
+
+ key_size_bytes = (key.curve.key_size + 7) // 8
+
+ x_bytes = deflate_long(numbers.x, add_sign_padding=False)
+ x_bytes = b"\x00" * (key_size_bytes - len(x_bytes)) + x_bytes
+
+ y_bytes = deflate_long(numbers.y, add_sign_padding=False)
+ y_bytes = b"\x00" * (key_size_bytes - len(y_bytes)) + y_bytes
+
+ point_str = four_byte + x_bytes + y_bytes
+ m.add_string(point_str)
+ return m.asbytes()
+
+ def __str__(self):
+ return self.asbytes()
+
+ @property
+ def _fields(self):
+ return (
+ self.get_name(),
+ self.verifying_key.public_numbers().x,
+ self.verifying_key.public_numbers().y,
+ )
+
+ def get_name(self):
+ return self.ecdsa_curve.key_format_identifier
+
+ def get_bits(self):
+ return self.ecdsa_curve.key_length
+
+ def can_sign(self):
+ return self.signing_key is not None
+
+ def sign_ssh_data(self, data, algorithm=None):
+ ecdsa = ec.ECDSA(self.ecdsa_curve.hash_object())
+ sig = self.signing_key.sign(data, ecdsa)
+ r, s = decode_dss_signature(sig)
+
+ m = Message()
+ m.add_string(self.ecdsa_curve.key_format_identifier)
+ m.add_string(self._sigencode(r, s))
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ if msg.get_text() != self.ecdsa_curve.key_format_identifier:
+ return False
+ sig = msg.get_binary()
+ sigR, sigS = self._sigdecode(sig)
+ signature = encode_dss_signature(sigR, sigS)
+
+ try:
+ self.verifying_key.verify(
+ signature, data, ec.ECDSA(self.ecdsa_curve.hash_object())
+ )
+ except InvalidSignature:
+ return False
+ else:
+ return True
+
+ def write_private_key_file(self, filename, password=None):
+ self._write_private_key_file(
+ filename,
+ self.signing_key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ def write_private_key(self, file_obj, password=None):
+ self._write_private_key(
+ file_obj,
+ self.signing_key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ @classmethod
+ def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None):
+ """
+ Generate a new private ECDSA key. This factory function can be used to
+ generate a new host key or authentication key.
+
+ :param progress_func: Not used for this type of key.
+ :returns: A new private key (`.ECDSAKey`) object
+ """
+ if bits is not None:
+ curve = cls._ECDSA_CURVES.get_by_key_length(bits)
+ if curve is None:
+ raise ValueError("Unsupported key length: {:d}".format(bits))
+ curve = curve.curve_class()
+
+ private_key = ec.generate_private_key(curve, backend=default_backend())
+ return ECDSAKey(vals=(private_key, private_key.public_key()))
+
+ # ...internals...
+
+ def _from_private_key_file(self, filename, password):
+ data = self._read_private_key_file("EC", filename, password)
+ self._decode_key(data)
+
+ def _from_private_key(self, file_obj, password):
+ data = self._read_private_key("EC", file_obj, password)
+ self._decode_key(data)
+
+ def _decode_key(self, data):
+ pkformat, data = data
+ if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
+ try:
+ key = serialization.load_der_private_key(
+ data, password=None, backend=default_backend()
+ )
+ except (
+ ValueError,
+ AssertionError,
+ TypeError,
+ UnsupportedAlgorithm,
+ ) as e:
+ raise SSHException(str(e))
+ elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
+ try:
+ msg = Message(data)
+ curve_name = msg.get_text()
+ verkey = msg.get_binary() # noqa: F841
+ sigkey = msg.get_mpint()
+ name = "ecdsa-sha2-" + curve_name
+ curve = self._ECDSA_CURVES.get_by_key_format_identifier(name)
+ if not curve:
+ raise SSHException("Invalid key curve identifier")
+ key = ec.derive_private_key(
+ sigkey, curve.curve_class(), default_backend()
+ )
+ except Exception as e:
+ # PKey._read_private_key_openssh() should check or return
+ # keytype - parsing could fail for any reason due to wrong type
+ raise SSHException(str(e))
+ else:
+ self._got_bad_key_format_id(pkformat)
+
+ self.signing_key = key
+ self.verifying_key = key.public_key()
+ curve_class = key.curve.__class__
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(curve_class)
+
+ def _sigencode(self, r, s):
+ msg = Message()
+ msg.add_mpint(r)
+ msg.add_mpint(s)
+ return msg.asbytes()
+
+ def _sigdecode(self, sig):
+ msg = Message(sig)
+ r = msg.get_mpint()
+ s = msg.get_mpint()
+ return r, s
diff --git a/paramiko/ed25519key.py b/paramiko/ed25519key.py
new file mode 100644
index 0000000..e5e81ac
--- /dev/null
+++ b/paramiko/ed25519key.py
@@ -0,0 +1,212 @@
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import bcrypt
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import Cipher
+
+import nacl.signing
+
+from paramiko.message import Message
+from paramiko.pkey import PKey, OPENSSH_AUTH_MAGIC, _unpad_openssh
+from paramiko.util import b
+from paramiko.ssh_exception import SSHException, PasswordRequiredException
+
+
+class Ed25519Key(PKey):
+ """
+ Representation of an `Ed25519 <https://ed25519.cr.yp.to/>`_ key.
+
+ .. note::
+ Ed25519 key support was added to OpenSSH in version 6.5.
+
+ .. versionadded:: 2.2
+ .. versionchanged:: 2.3
+ Added a ``file_obj`` parameter to match other key classes.
+ """
+
+ name = "ssh-ed25519"
+
+ def __init__(
+ self, msg=None, data=None, filename=None, password=None, file_obj=None
+ ):
+ self.public_blob = None
+ verifying_key = signing_key = None
+ if msg is None and data is not None:
+ msg = Message(data)
+ if msg is not None:
+ self._check_type_and_load_cert(
+ msg=msg,
+ key_type=self.name,
+ cert_type="ssh-ed25519-cert-v01@openssh.com",
+ )
+ verifying_key = nacl.signing.VerifyKey(msg.get_binary())
+ elif filename is not None:
+ with open(filename, "r") as f:
+ pkformat, data = self._read_private_key("OPENSSH", f)
+ elif file_obj is not None:
+ pkformat, data = self._read_private_key("OPENSSH", file_obj)
+
+ if filename or file_obj:
+ signing_key = self._parse_signing_key_data(data, password)
+
+ if signing_key is None and verifying_key is None:
+ raise ValueError("need a key")
+
+ self._signing_key = signing_key
+ self._verifying_key = verifying_key
+
+ def _parse_signing_key_data(self, data, password):
+ from paramiko.transport import Transport
+
+ # We may eventually want this to be usable for other key types, as
+ # OpenSSH moves to it, but for now this is just for Ed25519 keys.
+ # This format is described here:
+ # https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
+ # The description isn't totally complete, and I had to refer to the
+ # source for a full implementation.
+ message = Message(data)
+ if message.get_bytes(len(OPENSSH_AUTH_MAGIC)) != OPENSSH_AUTH_MAGIC:
+ raise SSHException("Invalid key")
+
+ ciphername = message.get_text()
+ kdfname = message.get_text()
+ kdfoptions = message.get_binary()
+ num_keys = message.get_int()
+
+ if kdfname == "none":
+ # kdfname of "none" must have an empty kdfoptions, the ciphername
+ # must be "none"
+ if kdfoptions or ciphername != "none":
+ raise SSHException("Invalid key")
+ elif kdfname == "bcrypt":
+ if not password:
+ raise PasswordRequiredException(
+ "Private key file is encrypted"
+ )
+ kdf = Message(kdfoptions)
+ bcrypt_salt = kdf.get_binary()
+ bcrypt_rounds = kdf.get_int()
+ else:
+ raise SSHException("Invalid key")
+
+ if ciphername != "none" and ciphername not in Transport._cipher_info:
+ raise SSHException("Invalid key")
+
+ public_keys = []
+ for _ in range(num_keys):
+ pubkey = Message(message.get_binary())
+ if pubkey.get_text() != self.name:
+ raise SSHException("Invalid key")
+ public_keys.append(pubkey.get_binary())
+
+ private_ciphertext = message.get_binary()
+ if ciphername == "none":
+ private_data = private_ciphertext
+ else:
+ cipher = Transport._cipher_info[ciphername]
+ key = bcrypt.kdf(
+ password=b(password),
+ salt=bcrypt_salt,
+ desired_key_bytes=cipher["key-size"] + cipher["block-size"],
+ rounds=bcrypt_rounds,
+ # We can't control how many rounds are on disk, so no sense
+ # warning about it.
+ ignore_few_rounds=True,
+ )
+ decryptor = Cipher(
+ cipher["class"](key[: cipher["key-size"]]),
+ cipher["mode"](key[cipher["key-size"] :]),
+ backend=default_backend(),
+ ).decryptor()
+ private_data = (
+ decryptor.update(private_ciphertext) + decryptor.finalize()
+ )
+
+ message = Message(_unpad_openssh(private_data))
+ if message.get_int() != message.get_int():
+ raise SSHException("Invalid key")
+
+ signing_keys = []
+ for i in range(num_keys):
+ if message.get_text() != self.name:
+ raise SSHException("Invalid key")
+ # A copy of the public key, again, ignore.
+ public = message.get_binary()
+ key_data = message.get_binary()
+ # The second half of the key data is yet another copy of the public
+ # key...
+ signing_key = nacl.signing.SigningKey(key_data[:32])
+ # Verify that all the public keys are the same...
+ assert (
+ signing_key.verify_key.encode()
+ == public
+ == public_keys[i]
+ == key_data[32:]
+ )
+ signing_keys.append(signing_key)
+ # Comment, ignore.
+ message.get_binary()
+
+ if len(signing_keys) != 1:
+ raise SSHException("Invalid key")
+ return signing_keys[0]
+
+ def asbytes(self):
+ if self.can_sign():
+ v = self._signing_key.verify_key
+ else:
+ v = self._verifying_key
+ m = Message()
+ m.add_string(self.name)
+ m.add_string(v.encode())
+ return m.asbytes()
+
+ @property
+ def _fields(self):
+ if self.can_sign():
+ v = self._signing_key.verify_key
+ else:
+ v = self._verifying_key
+ return (self.get_name(), v)
+
+ # TODO 4.0: remove
+ def get_name(self):
+ return self.name
+
+ def get_bits(self):
+ return 256
+
+ def can_sign(self):
+ return self._signing_key is not None
+
+ def sign_ssh_data(self, data, algorithm=None):
+ m = Message()
+ m.add_string(self.name)
+ m.add_string(self._signing_key.sign(data).signature)
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ if msg.get_text() != self.name:
+ return False
+
+ try:
+ self._verifying_key.verify(data, msg.get_binary())
+ except nacl.exceptions.BadSignatureError:
+ return False
+ else:
+ return True
diff --git a/paramiko/file.py b/paramiko/file.py
new file mode 100644
index 0000000..a36abb9
--- /dev/null
+++ b/paramiko/file.py
@@ -0,0 +1,528 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from io import BytesIO
+
+from paramiko.common import (
+ linefeed_byte_value,
+ crlf,
+ cr_byte,
+ linefeed_byte,
+ cr_byte_value,
+)
+
+from paramiko.util import ClosingContextManager, u
+
+
+class BufferedFile(ClosingContextManager):
+ """
+ Reusable base class to implement Python-style file buffering around a
+ simpler stream.
+ """
+
+ _DEFAULT_BUFSIZE = 8192
+
+ SEEK_SET = 0
+ SEEK_CUR = 1
+ SEEK_END = 2
+
+ FLAG_READ = 0x1
+ FLAG_WRITE = 0x2
+ FLAG_APPEND = 0x4
+ FLAG_BINARY = 0x10
+ FLAG_BUFFERED = 0x20
+ FLAG_LINE_BUFFERED = 0x40
+ FLAG_UNIVERSAL_NEWLINE = 0x80
+
+ def __init__(self):
+ self.newlines = None
+ self._flags = 0
+ self._bufsize = self._DEFAULT_BUFSIZE
+ self._wbuffer = BytesIO()
+ self._rbuffer = bytes()
+ self._at_trailing_cr = False
+ self._closed = False
+ # pos - position within the file, according to the user
+ # realpos - position according the OS
+ # (these may be different because we buffer for line reading)
+ self._pos = self._realpos = 0
+ # size only matters for seekable files
+ self._size = 0
+
+ def __del__(self):
+ self.close()
+
+ def __iter__(self):
+ """
+ Returns an iterator that can be used to iterate over the lines in this
+ file. This iterator happens to return the file itself, since a file is
+ its own iterator.
+
+ :raises: ``ValueError`` -- if the file is closed.
+ """
+ if self._closed:
+ raise ValueError("I/O operation on closed file")
+ return self
+
+ def close(self):
+ """
+ Close the file. Future read and write operations will fail.
+ """
+ self.flush()
+ self._closed = True
+
+ def flush(self):
+ """
+ Write out any data in the write buffer. This may do nothing if write
+ buffering is not turned on.
+ """
+ self._write_all(self._wbuffer.getvalue())
+ self._wbuffer = BytesIO()
+ return
+
+ def __next__(self):
+ """
+ Returns the next line from the input, or raises ``StopIteration``
+ when EOF is hit. Unlike python file objects, it's okay to mix
+ calls to `.next` and `.readline`.
+
+ :raises: ``StopIteration`` -- when the end of the file is reached.
+
+ :returns:
+ a line (`str`, or `bytes` if the file was opened in binary mode)
+ read from the file.
+ """
+ line = self.readline()
+ if not line:
+ raise StopIteration
+ return line
+
+ def readable(self):
+ """
+ Check if the file can be read from.
+
+ :returns:
+ `True` if the file can be read from. If `False`, `read` will raise
+ an exception.
+ """
+ return (self._flags & self.FLAG_READ) == self.FLAG_READ
+
+ def writable(self):
+ """
+ Check if the file can be written to.
+
+ :returns:
+ `True` if the file can be written to. If `False`, `write` will
+ raise an exception.
+ """
+ return (self._flags & self.FLAG_WRITE) == self.FLAG_WRITE
+
+ def seekable(self):
+ """
+ Check if the file supports random access.
+
+ :returns:
+ `True` if the file supports random access. If `False`, `seek` will
+ raise an exception.
+ """
+ return False
+
+ def readinto(self, buff):
+ """
+ Read up to ``len(buff)`` bytes into ``bytearray`` *buff* and return the
+ number of bytes read.
+
+ :returns:
+ The number of bytes read.
+ """
+ data = self.read(len(buff))
+ buff[: len(data)] = data
+ return len(data)
+
+ def read(self, size=None):
+ """
+ Read at most ``size`` bytes from the file (less if we hit the end of
+ the file first). If the ``size`` argument is negative or omitted,
+ read all the remaining data in the file.
+
+ .. note::
+ ``'b'`` mode flag is ignored (``self.FLAG_BINARY`` in
+ ``self._flags``), because SSH treats all files as binary, since we
+ have no idea what encoding the file is in, or even if the file is
+ text data.
+
+ :param int size: maximum number of bytes to read
+ :returns:
+ data read from the file (as bytes), or an empty string if EOF was
+ encountered immediately
+ """
+ if self._closed:
+ raise IOError("File is closed")
+ if not (self._flags & self.FLAG_READ):
+ raise IOError("File is not open for reading")
+ if (size is None) or (size < 0):
+ # go for broke
+ result = bytearray(self._rbuffer)
+ self._rbuffer = bytes()
+ self._pos += len(result)
+ while True:
+ try:
+ new_data = self._read(self._DEFAULT_BUFSIZE)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ break
+ result.extend(new_data)
+ self._realpos += len(new_data)
+ self._pos += len(new_data)
+ return bytes(result)
+ if size <= len(self._rbuffer):
+ result = self._rbuffer[:size]
+ self._rbuffer = self._rbuffer[size:]
+ self._pos += len(result)
+ return result
+ while len(self._rbuffer) < size:
+ read_size = size - len(self._rbuffer)
+ if self._flags & self.FLAG_BUFFERED:
+ read_size = max(self._bufsize, read_size)
+ try:
+ new_data = self._read(read_size)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ break
+ self._rbuffer += new_data
+ self._realpos += len(new_data)
+ result = self._rbuffer[:size]
+ self._rbuffer = self._rbuffer[size:]
+ self._pos += len(result)
+ return result
+
+ def readline(self, size=None):
+ """
+ Read one entire line from the file. A trailing newline character is
+ kept in the string (but may be absent when a file ends with an
+ incomplete line). If the size argument is present and non-negative, it
+ is a maximum byte count (including the trailing newline) and an
+ incomplete line may be returned. An empty string is returned only when
+ EOF is encountered immediately.
+
+ .. note::
+ Unlike stdio's ``fgets``, the returned string contains null
+ characters (``'\\0'``) if they occurred in the input.
+
+ :param int size: maximum length of returned string.
+ :returns:
+ next line of the file, or an empty string if the end of the
+ file has been reached.
+
+ If the file was opened in binary (``'b'``) mode: bytes are returned
+ Else: the encoding of the file is assumed to be UTF-8 and character
+ strings (`str`) are returned
+ """
+ # it's almost silly how complex this function is.
+ if self._closed:
+ raise IOError("File is closed")
+ if not (self._flags & self.FLAG_READ):
+ raise IOError("File not open for reading")
+ line = self._rbuffer
+ truncated = False
+ while True:
+ if (
+ self._at_trailing_cr
+ and self._flags & self.FLAG_UNIVERSAL_NEWLINE
+ and len(line) > 0
+ ):
+ # edge case: the newline may be '\r\n' and we may have read
+ # only the first '\r' last time.
+ if line[0] == linefeed_byte_value:
+ line = line[1:]
+ self._record_newline(crlf)
+ else:
+ self._record_newline(cr_byte)
+ self._at_trailing_cr = False
+ # check size before looking for a linefeed, in case we already have
+ # enough.
+ if (size is not None) and (size >= 0):
+ if len(line) >= size:
+ # truncate line
+ self._rbuffer = line[size:]
+ line = line[:size]
+ truncated = True
+ break
+ n = size - len(line)
+ else:
+ n = self._bufsize
+ if linefeed_byte in line or (
+ self._flags & self.FLAG_UNIVERSAL_NEWLINE and cr_byte in line
+ ):
+ break
+ try:
+ new_data = self._read(n)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ self._rbuffer = bytes()
+ self._pos += len(line)
+ return line if self._flags & self.FLAG_BINARY else u(line)
+ line += new_data
+ self._realpos += len(new_data)
+ # find the newline
+ pos = line.find(linefeed_byte)
+ if self._flags & self.FLAG_UNIVERSAL_NEWLINE:
+ rpos = line.find(cr_byte)
+ if (rpos >= 0) and (rpos < pos or pos < 0):
+ pos = rpos
+ if pos == -1:
+ # we couldn't find a newline in the truncated string, return it
+ self._pos += len(line)
+ return line if self._flags & self.FLAG_BINARY else u(line)
+ xpos = pos + 1
+ if (
+ line[pos] == cr_byte_value
+ and xpos < len(line)
+ and line[xpos] == linefeed_byte_value
+ ):
+ xpos += 1
+ # if the string was truncated, _rbuffer needs to have the string after
+ # the newline character plus the truncated part of the line we stored
+ # earlier in _rbuffer
+ if truncated:
+ self._rbuffer = line[xpos:] + self._rbuffer
+ else:
+ self._rbuffer = line[xpos:]
+
+ lf = line[pos:xpos]
+ line = line[:pos] + linefeed_byte
+ if (len(self._rbuffer) == 0) and (lf == cr_byte):
+ # we could read the line up to a '\r' and there could still be a
+ # '\n' following that we read next time. note that and eat it.
+ self._at_trailing_cr = True
+ else:
+ self._record_newline(lf)
+ self._pos += len(line)
+ return line if self._flags & self.FLAG_BINARY else u(line)
+
+ def readlines(self, sizehint=None):
+ """
+ Read all remaining lines using `readline` and return them as a list.
+ If the optional ``sizehint`` argument is present, instead of reading up
+ to EOF, whole lines totalling approximately sizehint bytes (possibly
+ after rounding up to an internal buffer size) are read.
+
+ :param int sizehint: desired maximum number of bytes to read.
+ :returns: list of lines read from the file.
+ """
+ lines = []
+ byte_count = 0
+ while True:
+ line = self.readline()
+ if len(line) == 0:
+ break
+ lines.append(line)
+ byte_count += len(line)
+ if (sizehint is not None) and (byte_count >= sizehint):
+ break
+ return lines
+
+ def seek(self, offset, whence=0):
+ """
+ Set the file's current position, like stdio's ``fseek``. Not all file
+ objects support seeking.
+
+ .. note::
+ If a file is opened in append mode (``'a'`` or ``'a+'``), any seek
+ operations will be undone at the next write (as the file position
+ will move back to the end of the file).
+
+ :param int offset:
+ position to move to within the file, relative to ``whence``.
+ :param int whence:
+ type of movement: 0 = absolute; 1 = relative to the current
+ position; 2 = relative to the end of the file.
+
+ :raises: ``IOError`` -- if the file doesn't support random access.
+ """
+ raise IOError("File does not support seeking.")
+
+ def tell(self):
+ """
+ Return the file's current position. This may not be accurate or
+ useful if the underlying file doesn't support random access, or was
+ opened in append mode.
+
+ :returns: file position (`number <int>` of bytes).
+ """
+ return self._pos
+
+ def write(self, data):
+ """
+ Write data to the file. If write buffering is on (``bufsize`` was
+ specified and non-zero), some or all of the data may not actually be
+ written yet. (Use `flush` or `close` to force buffered data to be
+ written out.)
+
+ :param data: ``str``/``bytes`` data to write
+ """
+ if isinstance(data, str):
+ # Accept text and encode as utf-8 for compatibility only.
+ data = data.encode("utf-8")
+ if self._closed:
+ raise IOError("File is closed")
+ if not (self._flags & self.FLAG_WRITE):
+ raise IOError("File not open for writing")
+ if not (self._flags & self.FLAG_BUFFERED):
+ self._write_all(data)
+ return
+ self._wbuffer.write(data)
+ if self._flags & self.FLAG_LINE_BUFFERED:
+ # only scan the new data for linefeed, to avoid wasting time.
+ last_newline_pos = data.rfind(linefeed_byte)
+ if last_newline_pos >= 0:
+ wbuf = self._wbuffer.getvalue()
+ last_newline_pos += len(wbuf) - len(data)
+ self._write_all(wbuf[: last_newline_pos + 1])
+ self._wbuffer = BytesIO()
+ self._wbuffer.write(wbuf[last_newline_pos + 1 :])
+ return
+ # even if we're line buffering, if the buffer has grown past the
+ # buffer size, force a flush.
+ if self._wbuffer.tell() >= self._bufsize:
+ self.flush()
+ return
+
+ def writelines(self, sequence):
+ """
+ Write a sequence of strings to the file. The sequence can be any
+ iterable object producing strings, typically a list of strings. (The
+ name is intended to match `readlines`; `writelines` does not add line
+ separators.)
+
+ :param sequence: an iterable sequence of strings.
+ """
+ for line in sequence:
+ self.write(line)
+ return
+
+ def xreadlines(self):
+ """
+ Identical to ``iter(f)``. This is a deprecated file interface that
+ predates Python iterator support.
+ """
+ return self
+
+ @property
+ def closed(self):
+ return self._closed
+
+ # ...overrides...
+
+ def _read(self, size):
+ """
+ (subclass override)
+ Read data from the stream. Return ``None`` or raise ``EOFError`` to
+ indicate EOF.
+ """
+ raise EOFError()
+
+ def _write(self, data):
+ """
+ (subclass override)
+ Write data into the stream.
+ """
+ raise IOError("write not implemented")
+
+ def _get_size(self):
+ """
+ (subclass override)
+ Return the size of the file. This is called from within `_set_mode`
+ if the file is opened in append mode, so the file position can be
+ tracked and `seek` and `tell` will work correctly. If the file is
+ a stream that can't be randomly accessed, you don't need to override
+ this method,
+ """
+ return 0
+
+ # ...internals...
+
+ def _set_mode(self, mode="r", bufsize=-1):
+ """
+ Subclasses call this method to initialize the BufferedFile.
+ """
+ # set bufsize in any event, because it's used for readline().
+ self._bufsize = self._DEFAULT_BUFSIZE
+ if bufsize < 0:
+ # do no buffering by default, because otherwise writes will get
+ # buffered in a way that will probably confuse people.
+ bufsize = 0
+ if bufsize == 1:
+ # apparently, line buffering only affects writes. reads are only
+ # buffered if you call readline (directly or indirectly: iterating
+ # over a file will indirectly call readline).
+ self._flags |= self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED
+ elif bufsize > 1:
+ self._bufsize = bufsize
+ self._flags |= self.FLAG_BUFFERED
+ self._flags &= ~self.FLAG_LINE_BUFFERED
+ elif bufsize == 0:
+ # unbuffered
+ self._flags &= ~(self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED)
+
+ if ("r" in mode) or ("+" in mode):
+ self._flags |= self.FLAG_READ
+ if ("w" in mode) or ("+" in mode):
+ self._flags |= self.FLAG_WRITE
+ if "a" in mode:
+ self._flags |= self.FLAG_WRITE | self.FLAG_APPEND
+ self._size = self._get_size()
+ self._pos = self._realpos = self._size
+ if "b" in mode:
+ self._flags |= self.FLAG_BINARY
+ if "U" in mode:
+ self._flags |= self.FLAG_UNIVERSAL_NEWLINE
+ # built-in file objects have this attribute to store which kinds of
+ # line terminations they've seen:
+ # <http://www.python.org/doc/current/lib/built-in-funcs.html>
+ self.newlines = None
+
+ def _write_all(self, raw_data):
+ # the underlying stream may be something that does partial writes (like
+ # a socket).
+ data = memoryview(raw_data)
+ while len(data) > 0:
+ count = self._write(data)
+ data = data[count:]
+ if self._flags & self.FLAG_APPEND:
+ self._size += count
+ self._pos = self._realpos = self._size
+ else:
+ self._pos += count
+ self._realpos += count
+ return None
+
+ def _record_newline(self, newline):
+ # silliness about tracking what kinds of newlines we've seen.
+ # i don't understand why it can be None, a string, or a tuple, instead
+ # of just always being a tuple, but we'll emulate that behavior anyway.
+ if not (self._flags & self.FLAG_UNIVERSAL_NEWLINE):
+ return
+ if self.newlines is None:
+ self.newlines = newline
+ elif self.newlines != newline and isinstance(self.newlines, bytes):
+ self.newlines = (self.newlines, newline)
+ elif newline not in self.newlines:
+ self.newlines += (newline,)
diff --git a/paramiko/hostkeys.py b/paramiko/hostkeys.py
new file mode 100644
index 0000000..4d47e95
--- /dev/null
+++ b/paramiko/hostkeys.py
@@ -0,0 +1,384 @@
+# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from base64 import encodebytes, decodebytes
+import binascii
+import os
+import re
+
+from collections.abc import MutableMapping
+from hashlib import sha1
+from hmac import HMAC
+
+
+from paramiko.pkey import PKey, UnknownKeyType
+from paramiko.util import get_logger, constant_time_bytes_eq, b, u
+from paramiko.ssh_exception import SSHException
+
+
+class HostKeys(MutableMapping):
+ """
+ Representation of an OpenSSH-style "known hosts" file. Host keys can be
+ read from one or more files, and then individual hosts can be looked up to
+ verify server keys during SSH negotiation.
+
+ A `.HostKeys` object can be treated like a dict; any dict lookup is
+ equivalent to calling `lookup`.
+
+ .. versionadded:: 1.5.3
+ """
+
+ def __init__(self, filename=None):
+ """
+ Create a new HostKeys object, optionally loading keys from an OpenSSH
+ style host-key file.
+
+ :param str filename: filename to load host keys from, or ``None``
+ """
+ # emulate a dict of { hostname: { keytype: PKey } }
+ self._entries = []
+ if filename is not None:
+ self.load(filename)
+
+ def add(self, hostname, keytype, key):
+ """
+ Add a host key entry to the table. Any existing entry for a
+ ``(hostname, keytype)`` pair will be replaced.
+
+ :param str hostname: the hostname (or IP) to add
+ :param str keytype: key type (``"ssh-rsa"`` or ``"ssh-dss"``)
+ :param .PKey key: the key to add
+ """
+ for e in self._entries:
+ if (hostname in e.hostnames) and (e.key.get_name() == keytype):
+ e.key = key
+ return
+ self._entries.append(HostKeyEntry([hostname], key))
+
+ def load(self, filename):
+ """
+ Read a file of known SSH host keys, in the format used by OpenSSH.
+ This type of file unfortunately doesn't exist on Windows, but on
+ posix, it will usually be stored in
+ ``os.path.expanduser("~/.ssh/known_hosts")``.
+
+ If this method is called multiple times, the host keys are merged,
+ not cleared. So multiple calls to `load` will just call `add`,
+ replacing any existing entries and adding new ones.
+
+ :param str filename: name of the file to read host keys from
+
+ :raises: ``IOError`` -- if there was an error reading the file
+ """
+ with open(filename, "r") as f:
+ for lineno, line in enumerate(f, 1):
+ line = line.strip()
+ if (len(line) == 0) or (line[0] == "#"):
+ continue
+ try:
+ entry = HostKeyEntry.from_line(line, lineno)
+ except SSHException:
+ continue
+ if entry is not None:
+ _hostnames = entry.hostnames
+ for h in _hostnames:
+ if self.check(h, entry.key):
+ entry.hostnames.remove(h)
+ if len(entry.hostnames):
+ self._entries.append(entry)
+
+ def save(self, filename):
+ """
+ Save host keys into a file, in the format used by OpenSSH. The order
+ of keys in the file will be preserved when possible (if these keys were
+ loaded from a file originally). The single exception is that combined
+ lines will be split into individual key lines, which is arguably a bug.
+
+ :param str filename: name of the file to write
+
+ :raises: ``IOError`` -- if there was an error writing the file
+
+ .. versionadded:: 1.6.1
+ """
+ with open(filename, "w") as f:
+ for e in self._entries:
+ line = e.to_line()
+ if line:
+ f.write(line)
+
+ def lookup(self, hostname):
+ """
+ Find a hostkey entry for a given hostname or IP. If no entry is found,
+ ``None`` is returned. Otherwise a dictionary of keytype to key is
+ returned. The keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``.
+
+ :param str hostname: the hostname (or IP) to lookup
+ :return: dict of `str` -> `.PKey` keys associated with this host
+ (or ``None``)
+ """
+
+ class SubDict(MutableMapping):
+ def __init__(self, hostname, entries, hostkeys):
+ self._hostname = hostname
+ self._entries = entries
+ self._hostkeys = hostkeys
+
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+
+ def __len__(self):
+ return len(self.keys())
+
+ def __delitem__(self, key):
+ for e in list(self._entries):
+ if e.key.get_name() == key:
+ self._entries.remove(e)
+ break
+ else:
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ for e in self._entries:
+ if e.key.get_name() == key:
+ return e.key
+ raise KeyError(key)
+
+ def __setitem__(self, key, val):
+ for e in self._entries:
+ if e.key is None:
+ continue
+ if e.key.get_name() == key:
+ # replace
+ e.key = val
+ break
+ else:
+ # add a new one
+ e = HostKeyEntry([hostname], val)
+ self._entries.append(e)
+ self._hostkeys._entries.append(e)
+
+ def keys(self):
+ return [
+ e.key.get_name()
+ for e in self._entries
+ if e.key is not None
+ ]
+
+ entries = []
+ for e in self._entries:
+ if self._hostname_matches(hostname, e):
+ entries.append(e)
+ if len(entries) == 0:
+ return None
+ return SubDict(hostname, entries, self)
+
+ def _hostname_matches(self, hostname, entry):
+ """
+ Tests whether ``hostname`` string matches given SubDict ``entry``.
+
+ :returns bool:
+ """
+ for h in entry.hostnames:
+ if (
+ h == hostname
+ or h.startswith("|1|")
+ and not hostname.startswith("|1|")
+ and constant_time_bytes_eq(self.hash_host(hostname, h), h)
+ ):
+ return True
+ return False
+
+ def check(self, hostname, key):
+ """
+ Return True if the given key is associated with the given hostname
+ in this dictionary.
+
+ :param str hostname: hostname (or IP) of the SSH server
+ :param .PKey key: the key to check
+ :return:
+ ``True`` if the key is associated with the hostname; else ``False``
+ """
+ k = self.lookup(hostname)
+ if k is None:
+ return False
+ host_key = k.get(key.get_name(), None)
+ if host_key is None:
+ return False
+ return host_key.asbytes() == key.asbytes()
+
+ def clear(self):
+ """
+ Remove all host keys from the dictionary.
+ """
+ self._entries = []
+
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+
+ def __len__(self):
+ return len(self.keys())
+
+ def __getitem__(self, key):
+ ret = self.lookup(key)
+ if ret is None:
+ raise KeyError(key)
+ return ret
+
+ def __delitem__(self, key):
+ index = None
+ for i, entry in enumerate(self._entries):
+ if self._hostname_matches(key, entry):
+ index = i
+ break
+ if index is None:
+ raise KeyError(key)
+ self._entries.pop(index)
+
+ def __setitem__(self, hostname, entry):
+ # don't use this please.
+ if len(entry) == 0:
+ self._entries.append(HostKeyEntry([hostname], None))
+ return
+ for key_type in entry.keys():
+ found = False
+ for e in self._entries:
+ if (hostname in e.hostnames) and e.key.get_name() == key_type:
+ # replace
+ e.key = entry[key_type]
+ found = True
+ if not found:
+ self._entries.append(HostKeyEntry([hostname], entry[key_type]))
+
+ def keys(self):
+ ret = []
+ for e in self._entries:
+ for h in e.hostnames:
+ if h not in ret:
+ ret.append(h)
+ return ret
+
+ def values(self):
+ ret = []
+ for k in self.keys():
+ ret.append(self.lookup(k))
+ return ret
+
+ @staticmethod
+ def hash_host(hostname, salt=None):
+ """
+ Return a "hashed" form of the hostname, as used by OpenSSH when storing
+ hashed hostnames in the known_hosts file.
+
+ :param str hostname: the hostname to hash
+ :param str salt: optional salt to use when hashing
+ (must be 20 bytes long)
+ :return: the hashed hostname as a `str`
+ """
+ if salt is None:
+ salt = os.urandom(sha1().digest_size)
+ else:
+ if salt.startswith("|1|"):
+ salt = salt.split("|")[2]
+ salt = decodebytes(b(salt))
+ assert len(salt) == sha1().digest_size
+ hmac = HMAC(salt, b(hostname), sha1).digest()
+ hostkey = "|1|{}|{}".format(u(encodebytes(salt)), u(encodebytes(hmac)))
+ return hostkey.replace("\n", "")
+
+
+class InvalidHostKey(Exception):
+ def __init__(self, line, exc):
+ self.line = line
+ self.exc = exc
+ self.args = (line, exc)
+
+
+class HostKeyEntry:
+ """
+ Representation of a line in an OpenSSH-style "known hosts" file.
+ """
+
+ def __init__(self, hostnames=None, key=None):
+ self.valid = (hostnames is not None) and (key is not None)
+ self.hostnames = hostnames
+ self.key = key
+
+ @classmethod
+ def from_line(cls, line, lineno=None):
+ """
+ Parses the given line of text to find the names for the host,
+ the type of key, and the key data. The line is expected to be in the
+ format used by the OpenSSH known_hosts file. Fields are separated by a
+ single space or tab.
+
+ Lines are expected to not have leading or trailing whitespace.
+ We don't bother to check for comments or empty lines. All of
+ that should be taken care of before sending the line to us.
+
+ :param str line: a line from an OpenSSH known_hosts file
+ """
+ log = get_logger("paramiko.hostkeys")
+ fields = re.split(" |\t", line)
+ if len(fields) < 3:
+ # Bad number of fields
+ msg = "Not enough fields found in known_hosts in line {} ({!r})"
+ log.info(msg.format(lineno, line))
+ return None
+ fields = fields[:3]
+
+ names, key_type, key = fields
+ names = names.split(",")
+
+ # Decide what kind of key we're looking at and create an object
+ # to hold it accordingly.
+ try:
+ # TODO: this grew organically and doesn't seem /wrong/ per se (file
+ # read -> unicode str -> bytes for base64 decode -> decoded bytes);
+ # but in Python 3 forever land, can we simply use
+ # `base64.b64decode(str-from-file)` here?
+ key_bytes = decodebytes(b(key))
+ except binascii.Error as e:
+ raise InvalidHostKey(line, e)
+
+ try:
+ return cls(names, PKey.from_type_string(key_type, key_bytes))
+ except UnknownKeyType:
+ # TODO 4.0: consider changing HostKeys API so this just raises
+ # naturally and the exception is muted higher up in the stack?
+ log.info("Unable to handle key of type {}".format(key_type))
+ return None
+
+ def to_line(self):
+ """
+ Returns a string in OpenSSH known_hosts file format, or None if
+ the object is not in a valid state. A trailing newline is
+ included.
+ """
+ if self.valid:
+ return "{} {} {}\n".format(
+ ",".join(self.hostnames),
+ self.key.get_name(),
+ self.key.get_base64(),
+ )
+ return None
+
+ def __repr__(self):
+ return "<HostKeyEntry {!r}: {!r}>".format(self.hostnames, self.key)
diff --git a/paramiko/kex_curve25519.py b/paramiko/kex_curve25519.py
new file mode 100644
index 0000000..20c23e4
--- /dev/null
+++ b/paramiko/kex_curve25519.py
@@ -0,0 +1,131 @@
+import binascii
+import hashlib
+
+from cryptography.exceptions import UnsupportedAlgorithm
+from cryptography.hazmat.primitives import constant_time, serialization
+from cryptography.hazmat.primitives.asymmetric.x25519 import (
+ X25519PrivateKey,
+ X25519PublicKey,
+)
+
+from paramiko.message import Message
+from paramiko.common import byte_chr
+from paramiko.ssh_exception import SSHException
+
+
+_MSG_KEXECDH_INIT, _MSG_KEXECDH_REPLY = range(30, 32)
+c_MSG_KEXECDH_INIT, c_MSG_KEXECDH_REPLY = [byte_chr(c) for c in range(30, 32)]
+
+
+class KexCurve25519:
+ hash_algo = hashlib.sha256
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.key = None
+
+ @classmethod
+ def is_available(cls):
+ try:
+ X25519PrivateKey.generate()
+ except UnsupportedAlgorithm:
+ return False
+ else:
+ return True
+
+ def _perform_exchange(self, peer_key):
+ secret = self.key.exchange(peer_key)
+ if constant_time.bytes_eq(secret, b"\x00" * 32):
+ raise SSHException(
+ "peer's curve25519 public value has wrong order"
+ )
+ return secret
+
+ def start_kex(self):
+ self.key = X25519PrivateKey.generate()
+ if self.transport.server_mode:
+ self.transport._expect_packet(_MSG_KEXECDH_INIT)
+ return
+
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_INIT)
+ m.add_string(
+ self.key.public_key().public_bytes(
+ serialization.Encoding.Raw, serialization.PublicFormat.Raw
+ )
+ )
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXECDH_REPLY)
+
+ def parse_next(self, ptype, m):
+ if self.transport.server_mode and (ptype == _MSG_KEXECDH_INIT):
+ return self._parse_kexecdh_init(m)
+ elif not self.transport.server_mode and (ptype == _MSG_KEXECDH_REPLY):
+ return self._parse_kexecdh_reply(m)
+ raise SSHException(
+ "KexCurve25519 asked to handle packet type {:d}".format(ptype)
+ )
+
+ def _parse_kexecdh_init(self, m):
+ peer_key_bytes = m.get_string()
+ peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
+ K = self._perform_exchange(peer_key)
+ K = int(binascii.hexlify(K), 16)
+ # compute exchange hash
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ server_key_bytes = self.transport.get_server_key().asbytes()
+ exchange_key_bytes = self.key.public_key().public_bytes(
+ serialization.Encoding.Raw, serialization.PublicFormat.Raw
+ )
+ hm.add_string(server_key_bytes)
+ hm.add_string(peer_key_bytes)
+ hm.add_string(exchange_key_bytes)
+ hm.add_mpint(K)
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # construct reply
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_REPLY)
+ m.add_string(server_key_bytes)
+ m.add_string(exchange_key_bytes)
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
+
+ def _parse_kexecdh_reply(self, m):
+ peer_host_key_bytes = m.get_string()
+ peer_key_bytes = m.get_string()
+ sig = m.get_binary()
+
+ peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
+
+ K = self._perform_exchange(peer_key)
+ K = int(binascii.hexlify(K), 16)
+ # compute exchange hash and verify signature
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(peer_host_key_bytes)
+ hm.add_string(
+ self.key.public_key().public_bytes(
+ serialization.Encoding.Raw, serialization.PublicFormat.Raw
+ )
+ )
+ hm.add_string(peer_key_bytes)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(peer_host_key_bytes, sig)
+ self.transport._activate_outbound()
diff --git a/paramiko/kex_ecdh_nist.py b/paramiko/kex_ecdh_nist.py
new file mode 100644
index 0000000..41fab46
--- /dev/null
+++ b/paramiko/kex_ecdh_nist.py
@@ -0,0 +1,151 @@
+"""
+Ephemeral Elliptic Curve Diffie-Hellman (ECDH) key exchange
+RFC 5656, Section 4
+"""
+
+from hashlib import sha256, sha384, sha512
+from paramiko.common import byte_chr
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives import serialization
+from binascii import hexlify
+
+_MSG_KEXECDH_INIT, _MSG_KEXECDH_REPLY = range(30, 32)
+c_MSG_KEXECDH_INIT, c_MSG_KEXECDH_REPLY = [byte_chr(c) for c in range(30, 32)]
+
+
+class KexNistp256:
+
+ name = "ecdh-sha2-nistp256"
+ hash_algo = sha256
+ curve = ec.SECP256R1()
+
+ def __init__(self, transport):
+ self.transport = transport
+ # private key, client public and server public keys
+ self.P = 0
+ self.Q_C = None
+ self.Q_S = None
+
+ def start_kex(self):
+ self._generate_key_pair()
+ if self.transport.server_mode:
+ self.transport._expect_packet(_MSG_KEXECDH_INIT)
+ return
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_INIT)
+ # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
+ m.add_string(
+ self.Q_C.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXECDH_REPLY)
+
+ def parse_next(self, ptype, m):
+ if self.transport.server_mode and (ptype == _MSG_KEXECDH_INIT):
+ return self._parse_kexecdh_init(m)
+ elif not self.transport.server_mode and (ptype == _MSG_KEXECDH_REPLY):
+ return self._parse_kexecdh_reply(m)
+ raise SSHException(
+ "KexECDH asked to handle packet type {:d}".format(ptype)
+ )
+
+ def _generate_key_pair(self):
+ self.P = ec.generate_private_key(self.curve, default_backend())
+ if self.transport.server_mode:
+ self.Q_S = self.P.public_key()
+ return
+ self.Q_C = self.P.public_key()
+
+ def _parse_kexecdh_init(self, m):
+ Q_C_bytes = m.get_string()
+ self.Q_C = ec.EllipticCurvePublicKey.from_encoded_point(
+ self.curve, Q_C_bytes
+ )
+ K_S = self.transport.get_server_key().asbytes()
+ K = self.P.exchange(ec.ECDH(), self.Q_C)
+ K = int(hexlify(K), 16)
+ # compute exchange hash
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ hm.add_string(K_S)
+ hm.add_string(Q_C_bytes)
+ # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
+ hm.add_string(
+ self.Q_S.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ hm.add_mpint(int(K))
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # construct reply
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_REPLY)
+ m.add_string(K_S)
+ m.add_string(
+ self.Q_S.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
+
+ def _parse_kexecdh_reply(self, m):
+ K_S = m.get_string()
+ Q_S_bytes = m.get_string()
+ self.Q_S = ec.EllipticCurvePublicKey.from_encoded_point(
+ self.curve, Q_S_bytes
+ )
+ sig = m.get_binary()
+ K = self.P.exchange(ec.ECDH(), self.Q_S)
+ K = int(hexlify(K), 16)
+ # compute exchange hash and verify signature
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(K_S)
+ # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
+ hm.add_string(
+ self.Q_C.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ hm.add_string(Q_S_bytes)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(K_S, sig)
+ self.transport._activate_outbound()
+
+
+class KexNistp384(KexNistp256):
+ name = "ecdh-sha2-nistp384"
+ hash_algo = sha384
+ curve = ec.SECP384R1()
+
+
+class KexNistp521(KexNistp256):
+ name = "ecdh-sha2-nistp521"
+ hash_algo = sha512
+ curve = ec.SECP521R1()
diff --git a/paramiko/kex_gex.py b/paramiko/kex_gex.py
new file mode 100644
index 0000000..baa0803
--- /dev/null
+++ b/paramiko/kex_gex.py
@@ -0,0 +1,288 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Variant on `KexGroup1 <paramiko.kex_group1.KexGroup1>` where the prime "p" and
+generator "g" are provided by the server. A bit more work is required on the
+client side, and a **lot** more on the server side.
+"""
+
+import os
+from hashlib import sha1, sha256
+
+from paramiko import util
+from paramiko.common import DEBUG, byte_chr, byte_ord, byte_mask
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+(
+ _MSG_KEXDH_GEX_REQUEST_OLD,
+ _MSG_KEXDH_GEX_GROUP,
+ _MSG_KEXDH_GEX_INIT,
+ _MSG_KEXDH_GEX_REPLY,
+ _MSG_KEXDH_GEX_REQUEST,
+) = range(30, 35)
+
+(
+ c_MSG_KEXDH_GEX_REQUEST_OLD,
+ c_MSG_KEXDH_GEX_GROUP,
+ c_MSG_KEXDH_GEX_INIT,
+ c_MSG_KEXDH_GEX_REPLY,
+ c_MSG_KEXDH_GEX_REQUEST,
+) = [byte_chr(c) for c in range(30, 35)]
+
+
+class KexGex:
+
+ name = "diffie-hellman-group-exchange-sha1"
+ min_bits = 1024
+ max_bits = 8192
+ preferred_bits = 2048
+ hash_algo = sha1
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.p = None
+ self.q = None
+ self.g = None
+ self.x = None
+ self.e = None
+ self.f = None
+ self.old_style = False
+
+ def start_kex(self, _test_old_style=False):
+ if self.transport.server_mode:
+ self.transport._expect_packet(
+ _MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD
+ )
+ return
+ # request a bit range: we accept (min_bits) to (max_bits), but prefer
+ # (preferred_bits). according to the spec, we shouldn't pull the
+ # minimum up above 1024.
+ m = Message()
+ if _test_old_style:
+ # only used for unit tests: we shouldn't ever send this
+ m.add_byte(c_MSG_KEXDH_GEX_REQUEST_OLD)
+ m.add_int(self.preferred_bits)
+ self.old_style = True
+ else:
+ m.add_byte(c_MSG_KEXDH_GEX_REQUEST)
+ m.add_int(self.min_bits)
+ m.add_int(self.preferred_bits)
+ m.add_int(self.max_bits)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_GROUP)
+
+ def parse_next(self, ptype, m):
+ if ptype == _MSG_KEXDH_GEX_REQUEST:
+ return self._parse_kexdh_gex_request(m)
+ elif ptype == _MSG_KEXDH_GEX_GROUP:
+ return self._parse_kexdh_gex_group(m)
+ elif ptype == _MSG_KEXDH_GEX_INIT:
+ return self._parse_kexdh_gex_init(m)
+ elif ptype == _MSG_KEXDH_GEX_REPLY:
+ return self._parse_kexdh_gex_reply(m)
+ elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD:
+ return self._parse_kexdh_gex_request_old(m)
+ msg = "KexGex {} asked to handle packet type {:d}"
+ raise SSHException(msg.format(self.name, ptype))
+
+ # ...internals...
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < (p-1)/2).
+ q = (self.p - 1) // 2
+ qnorm = util.deflate_long(q, 0)
+ qhbyte = byte_ord(qnorm[0])
+ byte_count = len(qnorm)
+ qmask = 0xFF
+ while not (qhbyte & 0x80):
+ qhbyte <<= 1
+ qmask >>= 1
+ while True:
+ x_bytes = os.urandom(byte_count)
+ x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
+ x = util.inflate_long(x_bytes, 1)
+ if (x > 1) and (x < q):
+ break
+ self.x = x
+
+ def _parse_kexdh_gex_request(self, m):
+ minbits = m.get_int()
+ preferredbits = m.get_int()
+ maxbits = m.get_int()
+ # smoosh the user's preferred size into our own limits
+ if preferredbits > self.max_bits:
+ preferredbits = self.max_bits
+ if preferredbits < self.min_bits:
+ preferredbits = self.min_bits
+ # fix min/max if they're inconsistent. technically, we could just pout
+ # and hang up, but there's no harm in giving them the benefit of the
+ # doubt and just picking a bitsize for them.
+ if minbits > preferredbits:
+ minbits = preferredbits
+ if maxbits < preferredbits:
+ maxbits = preferredbits
+ # now save a copy
+ self.min_bits = minbits
+ self.preferred_bits = preferredbits
+ self.max_bits = maxbits
+ # generate prime
+ pack = self.transport._get_modulus_pack()
+ if pack is None:
+ raise SSHException("Can't do server-side gex with no modulus pack")
+ self.transport._log(
+ DEBUG,
+ "Picking p ({} <= {} <= {} bits)".format(
+ minbits, preferredbits, maxbits
+ ),
+ )
+ self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_GROUP)
+ m.add_mpint(self.p)
+ m.add_mpint(self.g)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
+
+ def _parse_kexdh_gex_request_old(self, m):
+ # same as above, but without min_bits or max_bits (used by older
+ # clients like putty)
+ self.preferred_bits = m.get_int()
+ # smoosh the user's preferred size into our own limits
+ if self.preferred_bits > self.max_bits:
+ self.preferred_bits = self.max_bits
+ if self.preferred_bits < self.min_bits:
+ self.preferred_bits = self.min_bits
+ # generate prime
+ pack = self.transport._get_modulus_pack()
+ if pack is None:
+ raise SSHException("Can't do server-side gex with no modulus pack")
+ self.transport._log(
+ DEBUG, "Picking p (~ {} bits)".format(self.preferred_bits)
+ )
+ self.g, self.p = pack.get_modulus(
+ self.min_bits, self.preferred_bits, self.max_bits
+ )
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_GROUP)
+ m.add_mpint(self.p)
+ m.add_mpint(self.g)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
+ self.old_style = True
+
+ def _parse_kexdh_gex_group(self, m):
+ self.p = m.get_mpint()
+ self.g = m.get_mpint()
+ # reject if p's bit length < 1024 or > 8192
+ bitlen = util.bit_length(self.p)
+ if (bitlen < 1024) or (bitlen > 8192):
+ raise SSHException(
+ "Server-generated gex p (don't ask) is out of range "
+ "({} bits)".format(bitlen)
+ )
+ self.transport._log(DEBUG, "Got server p ({} bits)".format(bitlen))
+ self._generate_x()
+ # now compute e = g^x mod p
+ self.e = pow(self.g, self.x, self.p)
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_INIT)
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_REPLY)
+
+ def _parse_kexdh_gex_init(self, m):
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.p - 1):
+ raise SSHException('Client kex "e" is out of range')
+ self._generate_x()
+ self.f = pow(self.g, self.x, self.p)
+ K = pow(self.e, self.x, self.p)
+ key = self.transport.get_server_key().asbytes()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ key,
+ )
+ if not self.old_style:
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ if not self.old_style:
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ # sign it
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # send reply
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_REPLY)
+ m.add_string(key)
+ m.add_mpint(self.f)
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
+
+ def _parse_kexdh_gex_reply(self, m):
+ host_key = m.get_string()
+ self.f = m.get_mpint()
+ sig = m.get_string()
+ if (self.f < 1) or (self.f > self.p - 1):
+ raise SSHException('Server kex "f" is out of range')
+ K = pow(self.f, self.x, self.p)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ host_key,
+ )
+ if not self.old_style:
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ if not self.old_style:
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(host_key, sig)
+ self.transport._activate_outbound()
+
+
+class KexGexSHA256(KexGex):
+ name = "diffie-hellman-group-exchange-sha256"
+ hash_algo = sha256
diff --git a/paramiko/kex_group1.py b/paramiko/kex_group1.py
new file mode 100644
index 0000000..f074256
--- /dev/null
+++ b/paramiko/kex_group1.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
+1024 bit key halves, using a known "p" prime and "g" generator.
+"""
+
+import os
+from hashlib import sha1
+
+from paramiko import util
+from paramiko.common import max_byte, zero_byte, byte_chr, byte_mask
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+_MSG_KEXDH_INIT, _MSG_KEXDH_REPLY = range(30, 32)
+c_MSG_KEXDH_INIT, c_MSG_KEXDH_REPLY = [byte_chr(c) for c in range(30, 32)]
+
+b7fffffffffffffff = byte_chr(0x7F) + max_byte * 7
+b0000000000000000 = zero_byte * 8
+
+
+class KexGroup1:
+
+ # draft-ietf-secsh-transport-09.txt, page 17
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
+ G = 2
+
+ name = "diffie-hellman-group1-sha1"
+ hash_algo = sha1
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.x = 0
+ self.e = 0
+ self.f = 0
+
+ def start_kex(self):
+ self._generate_x()
+ if self.transport.server_mode:
+ # compute f = g^x mod p, but don't send it yet
+ self.f = pow(self.G, self.x, self.P)
+ self.transport._expect_packet(_MSG_KEXDH_INIT)
+ return
+ # compute e = g^x mod p (where g=2), and send it
+ self.e = pow(self.G, self.x, self.P)
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_INIT)
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_REPLY)
+
+ def parse_next(self, ptype, m):
+ if self.transport.server_mode and (ptype == _MSG_KEXDH_INIT):
+ return self._parse_kexdh_init(m)
+ elif not self.transport.server_mode and (ptype == _MSG_KEXDH_REPLY):
+ return self._parse_kexdh_reply(m)
+ msg = "KexGroup1 asked to handle packet type {:d}"
+ raise SSHException(msg.format(ptype))
+
+ # ...internals...
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < q), where q is (p-1)/2.
+ # p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
+ # therefore q can be approximated as a 2^1023. we drop the subset of
+ # potential x where the first 63 bits are 1, because some of those
+ # will be larger than q (but this is a tiny tiny subset of
+ # potential x).
+ while 1:
+ x_bytes = os.urandom(128)
+ x_bytes = byte_mask(x_bytes[0], 0x7F) + x_bytes[1:]
+ if (
+ x_bytes[:8] != b7fffffffffffffff
+ and x_bytes[:8] != b0000000000000000
+ ):
+ break
+ self.x = util.inflate_long(x_bytes)
+
+ def _parse_kexdh_reply(self, m):
+ # client mode
+ host_key = m.get_string()
+ self.f = m.get_mpint()
+ if (self.f < 1) or (self.f > self.P - 1):
+ raise SSHException('Server kex "f" is out of range')
+ sig = m.get_binary()
+ K = pow(self.f, self.x, self.P)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(host_key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(host_key, sig)
+ self.transport._activate_outbound()
+
+ def _parse_kexdh_init(self, m):
+ # server mode
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.P - 1):
+ raise SSHException('Client kex "e" is out of range')
+ K = pow(self.e, self.x, self.P)
+ key = self.transport.get_server_key().asbytes()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ hm.add_string(key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ # sign it
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # send reply
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_REPLY)
+ m.add_string(key)
+ m.add_mpint(self.f)
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
diff --git a/paramiko/kex_group14.py b/paramiko/kex_group14.py
new file mode 100644
index 0000000..8dee551
--- /dev/null
+++ b/paramiko/kex_group14.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2013 Torsten Landschoff <torsten@debian.org>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
+2048 bit key halves, using a known "p" prime and "g" generator.
+"""
+
+from paramiko.kex_group1 import KexGroup1
+from hashlib import sha1, sha256
+
+
+class KexGroup14(KexGroup1):
+
+ # http://tools.ietf.org/html/rfc3526#section-3
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
+ G = 2
+
+ name = "diffie-hellman-group14-sha1"
+ hash_algo = sha1
+
+
+class KexGroup14SHA256(KexGroup14):
+ name = "diffie-hellman-group14-sha256"
+ hash_algo = sha256
diff --git a/paramiko/kex_group16.py b/paramiko/kex_group16.py
new file mode 100644
index 0000000..c675f87
--- /dev/null
+++ b/paramiko/kex_group16.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2019 Edgar Sousa <https://github.com/edgsousa>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
+4096 bit key halves, using a known "p" prime and "g" generator.
+"""
+
+from paramiko.kex_group1 import KexGroup1
+from hashlib import sha512
+
+
+class KexGroup16SHA512(KexGroup1):
+ name = "diffie-hellman-group16-sha512"
+ # http://tools.ietf.org/html/rfc3526#section-5
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF # noqa
+ G = 2
+
+ name = "diffie-hellman-group16-sha512"
+ hash_algo = sha512
diff --git a/paramiko/kex_gss.py b/paramiko/kex_gss.py
new file mode 100644
index 0000000..2a5f29e
--- /dev/null
+++ b/paramiko/kex_gss.py
@@ -0,0 +1,686 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+# Copyright (C) 2013-2014 science + computing ag
+# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
+#
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+"""
+This module provides GSS-API / SSPI Key Exchange as defined in :rfc:`4462`.
+
+.. note:: Credential delegation is not supported in server mode.
+
+.. note::
+ `RFC 4462 Section 2.2
+ <https://tools.ietf.org/html/rfc4462.html#section-2.2>`_ says we are not
+ required to implement GSS-API error messages. Thus, in many methods within
+ this module, if an error occurs an exception will be thrown and the
+ connection will be terminated.
+
+.. seealso:: :doc:`/api/ssh_gss`
+
+.. versionadded:: 1.15
+"""
+
+import os
+from hashlib import sha1
+
+from paramiko.common import (
+ DEBUG,
+ max_byte,
+ zero_byte,
+ byte_chr,
+ byte_mask,
+ byte_ord,
+)
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+(
+ MSG_KEXGSS_INIT,
+ MSG_KEXGSS_CONTINUE,
+ MSG_KEXGSS_COMPLETE,
+ MSG_KEXGSS_HOSTKEY,
+ MSG_KEXGSS_ERROR,
+) = range(30, 35)
+(MSG_KEXGSS_GROUPREQ, MSG_KEXGSS_GROUP) = range(40, 42)
+(
+ c_MSG_KEXGSS_INIT,
+ c_MSG_KEXGSS_CONTINUE,
+ c_MSG_KEXGSS_COMPLETE,
+ c_MSG_KEXGSS_HOSTKEY,
+ c_MSG_KEXGSS_ERROR,
+) = [byte_chr(c) for c in range(30, 35)]
+(c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROUP) = [
+ byte_chr(c) for c in range(40, 42)
+]
+
+
+class KexGSSGroup1:
+ """
+ GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange as defined in `RFC
+ 4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_
+ """
+
+ # draft-ietf-secsh-transport-09.txt, page 17
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
+ G = 2
+ b7fffffffffffffff = byte_chr(0x7F) + max_byte * 7 # noqa
+ b0000000000000000 = zero_byte * 8 # noqa
+ NAME = "gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g=="
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.kexgss = self.transport.kexgss_ctxt
+ self.gss_host = None
+ self.x = 0
+ self.e = 0
+ self.f = 0
+
+ def start_kex(self):
+ """
+ Start the GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange.
+ """
+ self._generate_x()
+ if self.transport.server_mode:
+ # compute f = g^x mod p, but don't send it yet
+ self.f = pow(self.G, self.x, self.P)
+ self.transport._expect_packet(MSG_KEXGSS_INIT)
+ return
+ # compute e = g^x mod p (where g=2), and send it
+ self.e = pow(self.G, self.x, self.P)
+ # Initialize GSS-API Key Exchange
+ self.gss_host = self.transport.gss_host
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_INIT)
+ m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host))
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_HOSTKEY,
+ MSG_KEXGSS_CONTINUE,
+ MSG_KEXGSS_COMPLETE,
+ MSG_KEXGSS_ERROR,
+ )
+
+ def parse_next(self, ptype, m):
+ """
+ Parse the next packet.
+
+ :param ptype: The (string) type of the incoming packet
+ :param `.Message` m: The packet content
+ """
+ if self.transport.server_mode and (ptype == MSG_KEXGSS_INIT):
+ return self._parse_kexgss_init(m)
+ elif not self.transport.server_mode and (ptype == MSG_KEXGSS_HOSTKEY):
+ return self._parse_kexgss_hostkey(m)
+ elif self.transport.server_mode and (ptype == MSG_KEXGSS_CONTINUE):
+ return self._parse_kexgss_continue(m)
+ elif not self.transport.server_mode and (ptype == MSG_KEXGSS_COMPLETE):
+ return self._parse_kexgss_complete(m)
+ elif ptype == MSG_KEXGSS_ERROR:
+ return self._parse_kexgss_error(m)
+ msg = "GSS KexGroup1 asked to handle packet type {:d}"
+ raise SSHException(msg.format(ptype))
+
+ # ## internals...
+
+ def _generate_x(self):
+ """
+ generate an "x" (1 < x < q), where q is (p-1)/2.
+ p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
+ therefore q can be approximated as a 2^1023. we drop the subset of
+ potential x where the first 63 bits are 1, because some of those will
+ be larger than q (but this is a tiny tiny subset of potential x).
+ """
+ while 1:
+ x_bytes = os.urandom(128)
+ x_bytes = byte_mask(x_bytes[0], 0x7F) + x_bytes[1:]
+ first = x_bytes[:8]
+ if first not in (self.b7fffffffffffffff, self.b0000000000000000):
+ break
+ self.x = util.inflate_long(x_bytes)
+
+ def _parse_kexgss_hostkey(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_HOSTKEY message (client mode).
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_HOSTKEY message
+ """
+ # client mode
+ host_key = m.get_string()
+ self.transport.host_key = host_key
+ sig = m.get_string()
+ self.transport._verify_key(host_key, sig)
+ self.transport._expect_packet(MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE)
+
+ def _parse_kexgss_continue(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_CONTINUE message.
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE
+ message
+ """
+ if not self.transport.server_mode:
+ srv_token = m.get_string()
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ )
+ self.transport.send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+ else:
+ pass
+
+ def _parse_kexgss_complete(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode).
+
+ :param `.Message` m: The content of the
+ SSH2_MSG_KEXGSS_COMPLETE message
+ """
+ # client mode
+ if self.transport.host_key is None:
+ self.transport.host_key = NullHostKey()
+ self.f = m.get_mpint()
+ if (self.f < 1) or (self.f > self.P - 1):
+ raise SSHException('Server kex "f" is out of range')
+ mic_token = m.get_string()
+ # This must be TRUE, if there is a GSS-API token in this message.
+ bool = m.get_boolean()
+ srv_token = None
+ if bool:
+ srv_token = m.get_string()
+ K = pow(self.f, self.x, self.P)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(self.transport.host_key.__str__())
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(str(hm)).digest()
+ self.transport._set_K_H(K, H)
+ if srv_token is not None:
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ self.kexgss.ssh_check_mic(mic_token, H)
+ else:
+ self.kexgss.ssh_check_mic(mic_token, H)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+
+ def _parse_kexgss_init(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_INIT message (server mode).
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_INIT message
+ """
+ # server mode
+ client_token = m.get_string()
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.P - 1):
+ raise SSHException('Client kex "e" is out of range')
+ K = pow(self.e, self.x, self.P)
+ self.transport.host_key = NullHostKey()
+ key = self.transport.host_key.__str__()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ hm.add_string(key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ srv_token = self.kexgss.ssh_accept_sec_context(
+ self.gss_host, client_token
+ )
+ m = Message()
+ if self.kexgss._gss_srv_ctxt_status:
+ mic_token = self.kexgss.ssh_get_mic(
+ self.transport.session_id, gss_kex=True
+ )
+ m.add_byte(c_MSG_KEXGSS_COMPLETE)
+ m.add_mpint(self.f)
+ m.add_string(mic_token)
+ if srv_token is not None:
+ m.add_boolean(True)
+ m.add_string(srv_token)
+ else:
+ m.add_boolean(False)
+ self.transport._send_message(m)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+ else:
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(srv_token)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+
+ def _parse_kexgss_error(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_ERROR message (client mode).
+ The server may send a GSS-API error message. if it does, we display
+ the error by throwing an exception (client mode).
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_ERROR message
+ :raise SSHException: Contains GSS-API major and minor status as well as
+ the error message and the language tag of the
+ message
+ """
+ maj_status = m.get_int()
+ min_status = m.get_int()
+ err_msg = m.get_string()
+ m.get_string() # we don't care about the language!
+ raise SSHException(
+ """GSS-API Error:
+Major Status: {}
+Minor Status: {}
+Error Message: {}
+""".format(
+ maj_status, min_status, err_msg
+ )
+ )
+
+
+class KexGSSGroup14(KexGSSGroup1):
+ """
+ GSS-API / SSPI Authenticated Diffie-Hellman Group14 Key Exchange as defined
+ in `RFC 4462 Section 2
+ <https://tools.ietf.org/html/rfc4462.html#section-2>`_
+ """
+
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
+ G = 2
+ NAME = "gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g=="
+
+
+class KexGSSGex:
+ """
+ GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange as defined in
+ `RFC 4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_
+ """
+
+ NAME = "gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g=="
+ min_bits = 1024
+ max_bits = 8192
+ preferred_bits = 2048
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.kexgss = self.transport.kexgss_ctxt
+ self.gss_host = None
+ self.p = None
+ self.q = None
+ self.g = None
+ self.x = None
+ self.e = None
+ self.f = None
+ self.old_style = False
+
+ def start_kex(self):
+ """
+ Start the GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange
+ """
+ if self.transport.server_mode:
+ self.transport._expect_packet(MSG_KEXGSS_GROUPREQ)
+ return
+ # request a bit range: we accept (min_bits) to (max_bits), but prefer
+ # (preferred_bits). according to the spec, we shouldn't pull the
+ # minimum up above 1024.
+ self.gss_host = self.transport.gss_host
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_GROUPREQ)
+ m.add_int(self.min_bits)
+ m.add_int(self.preferred_bits)
+ m.add_int(self.max_bits)
+ self.transport._send_message(m)
+ self.transport._expect_packet(MSG_KEXGSS_GROUP)
+
+ def parse_next(self, ptype, m):
+ """
+ Parse the next packet.
+
+ :param ptype: The (string) type of the incoming packet
+ :param `.Message` m: The packet content
+ """
+ if ptype == MSG_KEXGSS_GROUPREQ:
+ return self._parse_kexgss_groupreq(m)
+ elif ptype == MSG_KEXGSS_GROUP:
+ return self._parse_kexgss_group(m)
+ elif ptype == MSG_KEXGSS_INIT:
+ return self._parse_kexgss_gex_init(m)
+ elif ptype == MSG_KEXGSS_HOSTKEY:
+ return self._parse_kexgss_hostkey(m)
+ elif ptype == MSG_KEXGSS_CONTINUE:
+ return self._parse_kexgss_continue(m)
+ elif ptype == MSG_KEXGSS_COMPLETE:
+ return self._parse_kexgss_complete(m)
+ elif ptype == MSG_KEXGSS_ERROR:
+ return self._parse_kexgss_error(m)
+ msg = "KexGex asked to handle packet type {:d}"
+ raise SSHException(msg.format(ptype))
+
+ # ## internals...
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < (p-1)/2).
+ q = (self.p - 1) // 2
+ qnorm = util.deflate_long(q, 0)
+ qhbyte = byte_ord(qnorm[0])
+ byte_count = len(qnorm)
+ qmask = 0xFF
+ while not (qhbyte & 0x80):
+ qhbyte <<= 1
+ qmask >>= 1
+ while True:
+ x_bytes = os.urandom(byte_count)
+ x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
+ x = util.inflate_long(x_bytes, 1)
+ if (x > 1) and (x < q):
+ break
+ self.x = x
+
+ def _parse_kexgss_groupreq(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_GROUPREQ message (server mode).
+
+ :param `.Message` m: The content of the
+ SSH2_MSG_KEXGSS_GROUPREQ message
+ """
+ minbits = m.get_int()
+ preferredbits = m.get_int()
+ maxbits = m.get_int()
+ # smoosh the user's preferred size into our own limits
+ if preferredbits > self.max_bits:
+ preferredbits = self.max_bits
+ if preferredbits < self.min_bits:
+ preferredbits = self.min_bits
+ # fix min/max if they're inconsistent. technically, we could just pout
+ # and hang up, but there's no harm in giving them the benefit of the
+ # doubt and just picking a bitsize for them.
+ if minbits > preferredbits:
+ minbits = preferredbits
+ if maxbits < preferredbits:
+ maxbits = preferredbits
+ # now save a copy
+ self.min_bits = minbits
+ self.preferred_bits = preferredbits
+ self.max_bits = maxbits
+ # generate prime
+ pack = self.transport._get_modulus_pack()
+ if pack is None:
+ raise SSHException("Can't do server-side gex with no modulus pack")
+ self.transport._log(
+ DEBUG, # noqa
+ "Picking p ({} <= {} <= {} bits)".format(
+ minbits, preferredbits, maxbits
+ ),
+ )
+ self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_GROUP)
+ m.add_mpint(self.p)
+ m.add_mpint(self.g)
+ self.transport._send_message(m)
+ self.transport._expect_packet(MSG_KEXGSS_INIT)
+
+ def _parse_kexgss_group(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_GROUP message (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_GROUP message
+ """
+ self.p = m.get_mpint()
+ self.g = m.get_mpint()
+ # reject if p's bit length < 1024 or > 8192
+ bitlen = util.bit_length(self.p)
+ if (bitlen < 1024) or (bitlen > 8192):
+ raise SSHException(
+ "Server-generated gex p (don't ask) is out of range "
+ "({} bits)".format(bitlen)
+ )
+ self.transport._log(
+ DEBUG, "Got server p ({} bits)".format(bitlen)
+ ) # noqa
+ self._generate_x()
+ # now compute e = g^x mod p
+ self.e = pow(self.g, self.x, self.p)
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_INIT)
+ m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host))
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_HOSTKEY,
+ MSG_KEXGSS_CONTINUE,
+ MSG_KEXGSS_COMPLETE,
+ MSG_KEXGSS_ERROR,
+ )
+
+ def _parse_kexgss_gex_init(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_INIT message (server mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_INIT message
+ """
+ client_token = m.get_string()
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.p - 1):
+ raise SSHException('Client kex "e" is out of range')
+ self._generate_x()
+ self.f = pow(self.g, self.x, self.p)
+ K = pow(self.e, self.x, self.p)
+ self.transport.host_key = NullHostKey()
+ key = self.transport.host_key.__str__()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ key,
+ )
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ srv_token = self.kexgss.ssh_accept_sec_context(
+ self.gss_host, client_token
+ )
+ m = Message()
+ if self.kexgss._gss_srv_ctxt_status:
+ mic_token = self.kexgss.ssh_get_mic(
+ self.transport.session_id, gss_kex=True
+ )
+ m.add_byte(c_MSG_KEXGSS_COMPLETE)
+ m.add_mpint(self.f)
+ m.add_string(mic_token)
+ if srv_token is not None:
+ m.add_boolean(True)
+ m.add_string(srv_token)
+ else:
+ m.add_boolean(False)
+ self.transport._send_message(m)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+ else:
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(srv_token)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+
+ def _parse_kexgss_hostkey(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_HOSTKEY message (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_HOSTKEY message
+ """
+ # client mode
+ host_key = m.get_string()
+ self.transport.host_key = host_key
+ sig = m.get_string()
+ self.transport._verify_key(host_key, sig)
+ self.transport._expect_packet(MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE)
+
+ def _parse_kexgss_continue(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_CONTINUE message.
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE message
+ """
+ if not self.transport.server_mode:
+ srv_token = m.get_string()
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ )
+ self.transport.send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+ else:
+ pass
+
+ def _parse_kexgss_complete(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_COMPLETE message
+ """
+ if self.transport.host_key is None:
+ self.transport.host_key = NullHostKey()
+ self.f = m.get_mpint()
+ mic_token = m.get_string()
+ # This must be TRUE, if there is a GSS-API token in this message.
+ bool = m.get_boolean()
+ srv_token = None
+ if bool:
+ srv_token = m.get_string()
+ if (self.f < 1) or (self.f > self.p - 1):
+ raise SSHException('Server kex "f" is out of range')
+ K = pow(self.f, self.x, self.p)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ self.transport.host_key.__str__(),
+ )
+ if not self.old_style:
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ if not self.old_style:
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ if srv_token is not None:
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ self.kexgss.ssh_check_mic(mic_token, H)
+ else:
+ self.kexgss.ssh_check_mic(mic_token, H)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+
+ def _parse_kexgss_error(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_ERROR message (client mode).
+ The server may send a GSS-API error message. if it does, we display
+ the error by throwing an exception (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_ERROR message
+ :raise SSHException: Contains GSS-API major and minor status as well as
+ the error message and the language tag of the
+ message
+ """
+ maj_status = m.get_int()
+ min_status = m.get_int()
+ err_msg = m.get_string()
+ m.get_string() # we don't care about the language (lang_tag)!
+ raise SSHException(
+ """GSS-API Error:
+Major Status: {}
+Minor Status: {}
+Error Message: {}
+""".format(
+ maj_status, min_status, err_msg
+ )
+ )
+
+
+class NullHostKey:
+ """
+ This class represents the Null Host Key for GSS-API Key Exchange as defined
+ in `RFC 4462 Section 5
+ <https://tools.ietf.org/html/rfc4462.html#section-5>`_
+ """
+
+ def __init__(self):
+ self.key = ""
+
+ def __str__(self):
+ return self.key
+
+ def get_name(self):
+ return self.key
diff --git a/paramiko/message.py b/paramiko/message.py
new file mode 100644
index 0000000..8c2b3bd
--- /dev/null
+++ b/paramiko/message.py
@@ -0,0 +1,318 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Implementation of an SSH2 "message".
+"""
+
+import struct
+from io import BytesIO
+
+from paramiko import util
+from paramiko.common import zero_byte, max_byte, one_byte
+from paramiko.util import u
+
+
+class Message:
+ """
+ An SSH2 message is a stream of bytes that encodes some combination of
+ strings, integers, bools, and infinite-precision integers. This class
+ builds or breaks down such a byte stream.
+
+ Normally you don't need to deal with anything this low-level, but it's
+ exposed for people implementing custom extensions, or features that
+ paramiko doesn't support yet.
+ """
+
+ big_int = 0xFF000000
+
+ def __init__(self, content=None):
+ """
+ Create a new SSH2 message.
+
+ :param bytes content:
+ the byte stream to use as the message content (passed in only when
+ decomposing a message).
+ """
+ if content is not None:
+ self.packet = BytesIO(content)
+ else:
+ self.packet = BytesIO()
+
+ def __bytes__(self):
+ return self.asbytes()
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+ """
+ return "paramiko.Message(" + repr(self.packet.getvalue()) + ")"
+
+ # TODO 4.0: just merge into __bytes__ (everywhere)
+ def asbytes(self):
+ """
+ Return the byte stream content of this Message, as a `bytes`.
+ """
+ return self.packet.getvalue()
+
+ def rewind(self):
+ """
+ Rewind the message to the beginning as if no items had been parsed
+ out of it yet.
+ """
+ self.packet.seek(0)
+
+ def get_remainder(self):
+ """
+ Return the `bytes` of this message that haven't already been parsed and
+ returned.
+ """
+ position = self.packet.tell()
+ remainder = self.packet.read()
+ self.packet.seek(position)
+ return remainder
+
+ def get_so_far(self):
+ """
+ Returns the `bytes` of this message that have been parsed and
+ returned. The string passed into a message's constructor can be
+ regenerated by concatenating ``get_so_far`` and `get_remainder`.
+ """
+ position = self.packet.tell()
+ self.rewind()
+ return self.packet.read(position)
+
+ def get_bytes(self, n):
+ """
+ Return the next ``n`` bytes of the message, without decomposing into an
+ int, decoded string, etc. Just the raw bytes are returned. Returns a
+ string of ``n`` zero bytes if there weren't ``n`` bytes remaining in
+ the message.
+ """
+ b = self.packet.read(n)
+ max_pad_size = 1 << 20 # Limit padding to 1 MB
+ if len(b) < n < max_pad_size:
+ return b + zero_byte * (n - len(b))
+ return b
+
+ def get_byte(self):
+ """
+ Return the next byte of the message, without decomposing it. This
+ is equivalent to `get_bytes(1) <get_bytes>`.
+
+ :return:
+ the next (`bytes`) byte of the message, or ``b'\000'`` if there
+ aren't any bytes remaining.
+ """
+ return self.get_bytes(1)
+
+ def get_boolean(self):
+ """
+ Fetch a boolean from the stream.
+ """
+ b = self.get_bytes(1)
+ return b != zero_byte
+
+ def get_adaptive_int(self):
+ """
+ Fetch an int from the stream.
+
+ :return: a 32-bit unsigned `int`.
+ """
+ byte = self.get_bytes(1)
+ if byte == max_byte:
+ return util.inflate_long(self.get_binary())
+ byte += self.get_bytes(3)
+ return struct.unpack(">I", byte)[0]
+
+ def get_int(self):
+ """
+ Fetch an int from the stream.
+ """
+ return struct.unpack(">I", self.get_bytes(4))[0]
+
+ def get_int64(self):
+ """
+ Fetch a 64-bit int from the stream.
+
+ :return: a 64-bit unsigned integer (`int`).
+ """
+ return struct.unpack(">Q", self.get_bytes(8))[0]
+
+ def get_mpint(self):
+ """
+ Fetch a long int (mpint) from the stream.
+
+ :return: an arbitrary-length integer (`int`).
+ """
+ return util.inflate_long(self.get_binary())
+
+ # TODO 4.0: depending on where this is used internally or downstream, force
+ # users to specify get_binary instead and delete this.
+ def get_string(self):
+ """
+ Fetch a "string" from the stream. This will actually be a `bytes`
+ object, and may contain unprintable characters. (It's not unheard of
+ for a string to contain another byte-stream message.)
+ """
+ return self.get_bytes(self.get_int())
+
+ # TODO 4.0: also consider having this take over the get_string name, and
+ # remove this name instead.
+ def get_text(self):
+ """
+ Fetch a Unicode string from the stream.
+
+ This currently operates by attempting to encode the next "string" as
+ ``utf-8``.
+ """
+ return u(self.get_string())
+
+ def get_binary(self):
+ """
+ Alias for `get_string` (obtains a bytestring).
+ """
+ return self.get_bytes(self.get_int())
+
+ def get_list(self):
+ """
+ Fetch a list of `strings <str>` from the stream.
+
+ These are trivially encoded as comma-separated values in a string.
+ """
+ return self.get_text().split(",")
+
+ def add_bytes(self, b):
+ """
+ Write bytes to the stream, without any formatting.
+
+ :param bytes b: bytes to add
+ """
+ self.packet.write(b)
+ return self
+
+ def add_byte(self, b):
+ """
+ Write a single byte to the stream, without any formatting.
+
+ :param bytes b: byte to add
+ """
+ self.packet.write(b)
+ return self
+
+ def add_boolean(self, b):
+ """
+ Add a boolean value to the stream.
+
+ :param bool b: boolean value to add
+ """
+ if b:
+ self.packet.write(one_byte)
+ else:
+ self.packet.write(zero_byte)
+ return self
+
+ def add_int(self, n):
+ """
+ Add an integer to the stream.
+
+ :param int n: integer to add
+ """
+ self.packet.write(struct.pack(">I", n))
+ return self
+
+ def add_adaptive_int(self, n):
+ """
+ Add an integer to the stream.
+
+ :param int n: integer to add
+ """
+ if n >= Message.big_int:
+ self.packet.write(max_byte)
+ self.add_string(util.deflate_long(n))
+ else:
+ self.packet.write(struct.pack(">I", n))
+ return self
+
+ def add_int64(self, n):
+ """
+ Add a 64-bit int to the stream.
+
+ :param int n: long int to add
+ """
+ self.packet.write(struct.pack(">Q", n))
+ return self
+
+ def add_mpint(self, z):
+ """
+ Add a long int to the stream, encoded as an infinite-precision
+ integer. This method only works on positive numbers.
+
+ :param int z: long int to add
+ """
+ self.add_string(util.deflate_long(z))
+ return self
+
+ # TODO: see the TODO for get_string/get_text/et al, this should change
+ # to match.
+ def add_string(self, s):
+ """
+ Add a bytestring to the stream.
+
+ :param byte s: bytestring to add
+ """
+ s = util.asbytes(s)
+ self.add_int(len(s))
+ self.packet.write(s)
+ return self
+
+ def add_list(self, l): # noqa: E741
+ """
+ Add a list of strings to the stream. They are encoded identically to
+ a single string of values separated by commas. (Yes, really, that's
+ how SSH2 does it.)
+
+ :param l: list of strings to add
+ """
+ self.add_string(",".join(l))
+ return self
+
+ def _add(self, i):
+ if type(i) is bool:
+ return self.add_boolean(i)
+ elif isinstance(i, int):
+ return self.add_adaptive_int(i)
+ elif type(i) is list:
+ return self.add_list(i)
+ else:
+ return self.add_string(i)
+
+ # TODO: this would never have worked for unicode strings under Python 3,
+ # guessing nobody/nothing ever used it for that purpose?
+ def add(self, *seq):
+ """
+ Add a sequence of items to the stream. The values are encoded based
+ on their type: bytes, str, int, bool, or list.
+
+ .. warning::
+ Longs are encoded non-deterministically. Don't use this method.
+
+ :param seq: the sequence of items
+ """
+ for item in seq:
+ self._add(item)
diff --git a/paramiko/packet.py b/paramiko/packet.py
new file mode 100644
index 0000000..1274a23
--- /dev/null
+++ b/paramiko/packet.py
@@ -0,0 +1,649 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Packet handling
+"""
+
+import errno
+import os
+import socket
+import struct
+import threading
+import time
+from hmac import HMAC
+
+from paramiko import util
+from paramiko.common import (
+ linefeed_byte,
+ cr_byte_value,
+ MSG_NAMES,
+ DEBUG,
+ xffffffff,
+ zero_byte,
+ byte_ord,
+)
+from paramiko.util import u
+from paramiko.ssh_exception import SSHException, ProxyCommandFailure
+from paramiko.message import Message
+
+
+def compute_hmac(key, message, digest_class):
+ return HMAC(key, message, digest_class).digest()
+
+
+class NeedRekeyException(Exception):
+ """
+ Exception indicating a rekey is needed.
+ """
+
+ pass
+
+
+def first_arg(e):
+ arg = None
+ if type(e.args) is tuple and len(e.args) > 0:
+ arg = e.args[0]
+ return arg
+
+
+class Packetizer:
+ """
+ Implementation of the base SSH packet protocol.
+ """
+
+ # READ the secsh RFC's before raising these values. if anything,
+ # they should probably be lower.
+ REKEY_PACKETS = pow(2, 29)
+ REKEY_BYTES = pow(2, 29)
+
+ # Allow receiving this many packets after a re-key request before
+ # terminating
+ REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29)
+ # Allow receiving this many bytes after a re-key request before terminating
+ REKEY_BYTES_OVERFLOW_MAX = pow(2, 29)
+
+ def __init__(self, socket):
+ self.__socket = socket
+ self.__logger = None
+ self.__closed = False
+ self.__dump_packets = False
+ self.__need_rekey = False
+ self.__init_count = 0
+ self.__remainder = bytes()
+ self._initial_kex_done = False
+
+ # used for noticing when to re-key:
+ self.__sent_bytes = 0
+ self.__sent_packets = 0
+ self.__received_bytes = 0
+ self.__received_packets = 0
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+
+ # current inbound/outbound ciphering:
+ self.__block_size_out = 8
+ self.__block_size_in = 8
+ self.__mac_size_out = 0
+ self.__mac_size_in = 0
+ self.__block_engine_out = None
+ self.__block_engine_in = None
+ self.__sdctr_out = False
+ self.__mac_engine_out = None
+ self.__mac_engine_in = None
+ self.__mac_key_out = bytes()
+ self.__mac_key_in = bytes()
+ self.__compress_engine_out = None
+ self.__compress_engine_in = None
+ self.__sequence_number_out = 0
+ self.__sequence_number_in = 0
+ self.__etm_out = False
+ self.__etm_in = False
+
+ # lock around outbound writes (packet computation)
+ self.__write_lock = threading.RLock()
+
+ # keepalives:
+ self.__keepalive_interval = 0
+ self.__keepalive_last = time.time()
+ self.__keepalive_callback = None
+
+ self.__timer = None
+ self.__handshake_complete = False
+ self.__timer_expired = False
+
+ @property
+ def closed(self):
+ return self.__closed
+
+ def reset_seqno_out(self):
+ self.__sequence_number_out = 0
+
+ def reset_seqno_in(self):
+ self.__sequence_number_in = 0
+
+ def set_log(self, log):
+ """
+ Set the Python log object to use for logging.
+ """
+ self.__logger = log
+
+ def set_outbound_cipher(
+ self,
+ block_engine,
+ block_size,
+ mac_engine,
+ mac_size,
+ mac_key,
+ sdctr=False,
+ etm=False,
+ ):
+ """
+ Switch outbound data cipher.
+ :param etm: Set encrypt-then-mac from OpenSSH
+ """
+ self.__block_engine_out = block_engine
+ self.__sdctr_out = sdctr
+ self.__block_size_out = block_size
+ self.__mac_engine_out = mac_engine
+ self.__mac_size_out = mac_size
+ self.__mac_key_out = mac_key
+ self.__sent_bytes = 0
+ self.__sent_packets = 0
+ self.__etm_out = etm
+ # wait until the reset happens in both directions before clearing
+ # rekey flag
+ self.__init_count |= 1
+ if self.__init_count == 3:
+ self.__init_count = 0
+ self.__need_rekey = False
+
+ def set_inbound_cipher(
+ self,
+ block_engine,
+ block_size,
+ mac_engine,
+ mac_size,
+ mac_key,
+ etm=False,
+ ):
+ """
+ Switch inbound data cipher.
+ :param etm: Set encrypt-then-mac from OpenSSH
+ """
+ self.__block_engine_in = block_engine
+ self.__block_size_in = block_size
+ self.__mac_engine_in = mac_engine
+ self.__mac_size_in = mac_size
+ self.__mac_key_in = mac_key
+ self.__received_bytes = 0
+ self.__received_packets = 0
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+ self.__etm_in = etm
+ # wait until the reset happens in both directions before clearing
+ # rekey flag
+ self.__init_count |= 2
+ if self.__init_count == 3:
+ self.__init_count = 0
+ self.__need_rekey = False
+
+ def set_outbound_compressor(self, compressor):
+ self.__compress_engine_out = compressor
+
+ def set_inbound_compressor(self, compressor):
+ self.__compress_engine_in = compressor
+
+ def close(self):
+ self.__closed = True
+ self.__socket.close()
+
+ def set_hexdump(self, hexdump):
+ self.__dump_packets = hexdump
+
+ def get_hexdump(self):
+ return self.__dump_packets
+
+ def get_mac_size_in(self):
+ return self.__mac_size_in
+
+ def get_mac_size_out(self):
+ return self.__mac_size_out
+
+ def need_rekey(self):
+ """
+ Returns ``True`` if a new set of keys needs to be negotiated. This
+ will be triggered during a packet read or write, so it should be
+ checked after every read or write, or at least after every few.
+ """
+ return self.__need_rekey
+
+ def set_keepalive(self, interval, callback):
+ """
+ Turn on/off the callback keepalive. If ``interval`` seconds pass with
+ no data read from or written to the socket, the callback will be
+ executed and the timer will be reset.
+ """
+ self.__keepalive_interval = interval
+ self.__keepalive_callback = callback
+ self.__keepalive_last = time.time()
+
+ def read_timer(self):
+ self.__timer_expired = True
+
+ def start_handshake(self, timeout):
+ """
+ Tells `Packetizer` that the handshake process started.
+ Starts a book keeping timer that can signal a timeout in the
+ handshake process.
+
+ :param float timeout: amount of seconds to wait before timing out
+ """
+ if not self.__timer:
+ self.__timer = threading.Timer(float(timeout), self.read_timer)
+ self.__timer.start()
+
+ def handshake_timed_out(self):
+ """
+ Checks if the handshake has timed out.
+
+ If `start_handshake` wasn't called before the call to this function,
+ the return value will always be `False`. If the handshake completed
+ before a timeout was reached, the return value will be `False`
+
+ :return: handshake time out status, as a `bool`
+ """
+ if not self.__timer:
+ return False
+ if self.__handshake_complete:
+ return False
+ return self.__timer_expired
+
+ def complete_handshake(self):
+ """
+ Tells `Packetizer` that the handshake has completed.
+ """
+ if self.__timer:
+ self.__timer.cancel()
+ self.__timer_expired = False
+ self.__handshake_complete = True
+
+ def read_all(self, n, check_rekey=False):
+ """
+ Read as close to N bytes as possible, blocking as long as necessary.
+
+ :param int n: number of bytes to read
+ :return: the data read, as a `str`
+
+ :raises:
+ ``EOFError`` -- if the socket was closed before all the bytes could
+ be read
+ """
+ out = bytes()
+ # handle over-reading from reading the banner line
+ if len(self.__remainder) > 0:
+ out = self.__remainder[:n]
+ self.__remainder = self.__remainder[n:]
+ n -= len(out)
+ while n > 0:
+ got_timeout = False
+ if self.handshake_timed_out():
+ raise EOFError()
+ try:
+ x = self.__socket.recv(n)
+ if len(x) == 0:
+ raise EOFError()
+ out += x
+ n -= len(x)
+ except socket.timeout:
+ got_timeout = True
+ except socket.error as e:
+ # on Linux, sometimes instead of socket.timeout, we get
+ # EAGAIN. this is a bug in recent (> 2.6.9) kernels but
+ # we need to work around it.
+ arg = first_arg(e)
+ if arg == errno.EAGAIN:
+ got_timeout = True
+ elif self.__closed:
+ raise EOFError()
+ else:
+ raise
+ if got_timeout:
+ if self.__closed:
+ raise EOFError()
+ if check_rekey and (len(out) == 0) and self.__need_rekey:
+ raise NeedRekeyException()
+ self._check_keepalive()
+ return out
+
+ def write_all(self, out):
+ self.__keepalive_last = time.time()
+ iteration_with_zero_as_return_value = 0
+ while len(out) > 0:
+ retry_write = False
+ try:
+ n = self.__socket.send(out)
+ except socket.timeout:
+ retry_write = True
+ except socket.error as e:
+ arg = first_arg(e)
+ if arg == errno.EAGAIN:
+ retry_write = True
+ else:
+ n = -1
+ except ProxyCommandFailure:
+ raise # so it doesn't get swallowed by the below catchall
+ except Exception:
+ # could be: (32, 'Broken pipe')
+ n = -1
+ if retry_write:
+ n = 0
+ if self.__closed:
+ n = -1
+ else:
+ if n == 0 and iteration_with_zero_as_return_value > 10:
+ # We shouldn't retry the write, but we didn't
+ # manage to send anything over the socket. This might be an
+ # indication that we have lost contact with the remote
+ # side, but are yet to receive an EOFError or other socket
+ # errors. Let's give it some iteration to try and catch up.
+ n = -1
+ iteration_with_zero_as_return_value += 1
+ if n < 0:
+ raise EOFError()
+ if n == len(out):
+ break
+ out = out[n:]
+ return
+
+ def readline(self, timeout):
+ """
+ Read a line from the socket. We assume no data is pending after the
+ line, so it's okay to attempt large reads.
+ """
+ buf = self.__remainder
+ while linefeed_byte not in buf:
+ buf += self._read_timeout(timeout)
+ n = buf.index(linefeed_byte)
+ self.__remainder = buf[n + 1 :]
+ buf = buf[:n]
+ if (len(buf) > 0) and (buf[-1] == cr_byte_value):
+ buf = buf[:-1]
+ return u(buf)
+
+ def send_message(self, data):
+ """
+ Write a block of data using the current cipher, as an SSH block.
+ """
+ # encrypt this sucka
+ data = data.asbytes()
+ cmd = byte_ord(data[0])
+ if cmd in MSG_NAMES:
+ cmd_name = MSG_NAMES[cmd]
+ else:
+ cmd_name = "${:x}".format(cmd)
+ orig_len = len(data)
+ self.__write_lock.acquire()
+ try:
+ if self.__compress_engine_out is not None:
+ data = self.__compress_engine_out(data)
+ packet = self._build_packet(data)
+ if self.__dump_packets:
+ self._log(
+ DEBUG,
+ "Write packet <{}>, length {}".format(cmd_name, orig_len),
+ )
+ self._log(DEBUG, util.format_binary(packet, "OUT: "))
+ if self.__block_engine_out is not None:
+ if self.__etm_out:
+ # packet length is not encrypted in EtM
+ out = packet[0:4] + self.__block_engine_out.update(
+ packet[4:]
+ )
+ else:
+ out = self.__block_engine_out.update(packet)
+ else:
+ out = packet
+ # + mac
+ if self.__block_engine_out is not None:
+ packed = struct.pack(">I", self.__sequence_number_out)
+ payload = packed + (out if self.__etm_out else packet)
+ out += compute_hmac(
+ self.__mac_key_out, payload, self.__mac_engine_out
+ )[: self.__mac_size_out]
+ next_seq = (self.__sequence_number_out + 1) & xffffffff
+ if next_seq == 0 and not self._initial_kex_done:
+ raise SSHException(
+ "Sequence number rolled over during initial kex!"
+ )
+ self.__sequence_number_out = next_seq
+ self.write_all(out)
+
+ self.__sent_bytes += len(out)
+ self.__sent_packets += 1
+ sent_too_much = (
+ self.__sent_packets >= self.REKEY_PACKETS
+ or self.__sent_bytes >= self.REKEY_BYTES
+ )
+ if sent_too_much and not self.__need_rekey:
+ # only ask once for rekeying
+ msg = "Rekeying (hit {} packets, {} bytes sent)"
+ self._log(
+ DEBUG, msg.format(self.__sent_packets, self.__sent_bytes)
+ )
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+ self._trigger_rekey()
+ finally:
+ self.__write_lock.release()
+
+ def read_message(self):
+ """
+ Only one thread should ever be in this function (no other locking is
+ done).
+
+ :raises: `.SSHException` -- if the packet is mangled
+ :raises: `.NeedRekeyException` -- if the transport should rekey
+ """
+ header = self.read_all(self.__block_size_in, check_rekey=True)
+ if self.__etm_in:
+ packet_size = struct.unpack(">I", header[:4])[0]
+ remaining = packet_size - self.__block_size_in + 4
+ packet = header[4:] + self.read_all(remaining, check_rekey=False)
+ mac = self.read_all(self.__mac_size_in, check_rekey=False)
+ mac_payload = (
+ struct.pack(">II", self.__sequence_number_in, packet_size)
+ + packet
+ )
+ my_mac = compute_hmac(
+ self.__mac_key_in, mac_payload, self.__mac_engine_in
+ )[: self.__mac_size_in]
+ if not util.constant_time_bytes_eq(my_mac, mac):
+ raise SSHException("Mismatched MAC")
+ header = packet
+
+ if self.__block_engine_in is not None:
+ header = self.__block_engine_in.update(header)
+ if self.__dump_packets:
+ self._log(DEBUG, util.format_binary(header, "IN: "))
+
+ # When ETM is in play, we've already read the packet size & decrypted
+ # everything, so just set the packet back to the header we obtained.
+ if self.__etm_in:
+ packet = header
+ # Otherwise, use the older non-ETM logic
+ else:
+ packet_size = struct.unpack(">I", header[:4])[0]
+
+ # leftover contains decrypted bytes from the first block (after the
+ # length field)
+ leftover = header[4:]
+ if (packet_size - len(leftover)) % self.__block_size_in != 0:
+ raise SSHException("Invalid packet blocking")
+ buf = self.read_all(
+ packet_size + self.__mac_size_in - len(leftover)
+ )
+ packet = buf[: packet_size - len(leftover)]
+ post_packet = buf[packet_size - len(leftover) :]
+
+ if self.__block_engine_in is not None:
+ packet = self.__block_engine_in.update(packet)
+ packet = leftover + packet
+
+ if self.__dump_packets:
+ self._log(DEBUG, util.format_binary(packet, "IN: "))
+
+ if self.__mac_size_in > 0 and not self.__etm_in:
+ mac = post_packet[: self.__mac_size_in]
+ mac_payload = (
+ struct.pack(">II", self.__sequence_number_in, packet_size)
+ + packet
+ )
+ my_mac = compute_hmac(
+ self.__mac_key_in, mac_payload, self.__mac_engine_in
+ )[: self.__mac_size_in]
+ if not util.constant_time_bytes_eq(my_mac, mac):
+ raise SSHException("Mismatched MAC")
+ padding = byte_ord(packet[0])
+ payload = packet[1 : packet_size - padding]
+
+ if self.__dump_packets:
+ self._log(
+ DEBUG,
+ "Got payload ({} bytes, {} padding)".format(
+ packet_size, padding
+ ),
+ )
+
+ if self.__compress_engine_in is not None:
+ payload = self.__compress_engine_in(payload)
+
+ msg = Message(payload[1:])
+ msg.seqno = self.__sequence_number_in
+ next_seq = (self.__sequence_number_in + 1) & xffffffff
+ if next_seq == 0 and not self._initial_kex_done:
+ raise SSHException(
+ "Sequence number rolled over during initial kex!"
+ )
+ self.__sequence_number_in = next_seq
+
+ # check for rekey
+ raw_packet_size = packet_size + self.__mac_size_in + 4
+ self.__received_bytes += raw_packet_size
+ self.__received_packets += 1
+ if self.__need_rekey:
+ # we've asked to rekey -- give them some packets to comply before
+ # dropping the connection
+ self.__received_bytes_overflow += raw_packet_size
+ self.__received_packets_overflow += 1
+ if (
+ self.__received_packets_overflow
+ >= self.REKEY_PACKETS_OVERFLOW_MAX
+ ) or (
+ self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX
+ ):
+ raise SSHException(
+ "Remote transport is ignoring rekey requests"
+ )
+ elif (self.__received_packets >= self.REKEY_PACKETS) or (
+ self.__received_bytes >= self.REKEY_BYTES
+ ):
+ # only ask once for rekeying
+ err = "Rekeying (hit {} packets, {} bytes received)"
+ self._log(
+ DEBUG,
+ err.format(self.__received_packets, self.__received_bytes),
+ )
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+ self._trigger_rekey()
+
+ cmd = byte_ord(payload[0])
+ if cmd in MSG_NAMES:
+ cmd_name = MSG_NAMES[cmd]
+ else:
+ cmd_name = "${:x}".format(cmd)
+ if self.__dump_packets:
+ self._log(
+ DEBUG,
+ "Read packet <{}>, length {}".format(cmd_name, len(payload)),
+ )
+ return cmd, msg
+
+ # ...protected...
+
+ def _log(self, level, msg):
+ if self.__logger is None:
+ return
+ if issubclass(type(msg), list):
+ for m in msg:
+ self.__logger.log(level, m)
+ else:
+ self.__logger.log(level, msg)
+
+ def _check_keepalive(self):
+ if (
+ not self.__keepalive_interval
+ or not self.__block_engine_out
+ or self.__need_rekey
+ ):
+ # wait till we're encrypting, and not in the middle of rekeying
+ return
+ now = time.time()
+ if now > self.__keepalive_last + self.__keepalive_interval:
+ self.__keepalive_callback()
+ self.__keepalive_last = now
+
+ def _read_timeout(self, timeout):
+ start = time.time()
+ while True:
+ try:
+ x = self.__socket.recv(128)
+ if len(x) == 0:
+ raise EOFError()
+ break
+ except socket.timeout:
+ pass
+ if self.__closed:
+ raise EOFError()
+ now = time.time()
+ if now - start >= timeout:
+ raise socket.timeout()
+ return x
+
+ def _build_packet(self, payload):
+ # pad up at least 4 bytes, to nearest block-size (usually 8)
+ bsize = self.__block_size_out
+ # do not include payload length in computations for padding in EtM mode
+ # (payload length won't be encrypted)
+ addlen = 4 if self.__etm_out else 8
+ padding = 3 + bsize - ((len(payload) + addlen) % bsize)
+ packet = struct.pack(">IB", len(payload) + padding + 1, padding)
+ packet += payload
+ if self.__sdctr_out or self.__block_engine_out is None:
+ # cute trick i caught openssh doing: if we're not encrypting or
+ # SDCTR mode (RFC4344),
+ # don't waste random bytes for the padding
+ packet += zero_byte * padding
+ else:
+ packet += os.urandom(padding)
+ return packet
+
+ def _trigger_rekey(self):
+ # outside code should check for this flag
+ self.__need_rekey = True
diff --git a/paramiko/pipe.py b/paramiko/pipe.py
new file mode 100644
index 0000000..65944fa
--- /dev/null
+++ b/paramiko/pipe.py
@@ -0,0 +1,148 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Abstraction of a one-way pipe where the read end can be used in
+`select.select`. Normally this is trivial, but Windows makes it nearly
+impossible.
+
+The pipe acts like an Event, which can be set or cleared. When set, the pipe
+will trigger as readable in `select <select.select>`.
+"""
+
+import sys
+import os
+import socket
+
+
+def make_pipe():
+ if sys.platform[:3] != "win":
+ p = PosixPipe()
+ else:
+ p = WindowsPipe()
+ return p
+
+
+class PosixPipe:
+ def __init__(self):
+ self._rfd, self._wfd = os.pipe()
+ self._set = False
+ self._forever = False
+ self._closed = False
+
+ def close(self):
+ os.close(self._rfd)
+ os.close(self._wfd)
+ # used for unit tests:
+ self._closed = True
+
+ def fileno(self):
+ return self._rfd
+
+ def clear(self):
+ if not self._set or self._forever:
+ return
+ os.read(self._rfd, 1)
+ self._set = False
+
+ def set(self):
+ if self._set or self._closed:
+ return
+ self._set = True
+ os.write(self._wfd, b"*")
+
+ def set_forever(self):
+ self._forever = True
+ self.set()
+
+
+class WindowsPipe:
+ """
+ On Windows, only an OS-level "WinSock" may be used in select(), but reads
+ and writes must be to the actual socket object.
+ """
+
+ def __init__(self):
+ serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ serv.bind(("127.0.0.1", 0))
+ serv.listen(1)
+
+ # need to save sockets in _rsock/_wsock so they don't get closed
+ self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._rsock.connect(("127.0.0.1", serv.getsockname()[1]))
+
+ self._wsock, addr = serv.accept()
+ serv.close()
+ self._set = False
+ self._forever = False
+ self._closed = False
+
+ def close(self):
+ self._rsock.close()
+ self._wsock.close()
+ # used for unit tests:
+ self._closed = True
+
+ def fileno(self):
+ return self._rsock.fileno()
+
+ def clear(self):
+ if not self._set or self._forever:
+ return
+ self._rsock.recv(1)
+ self._set = False
+
+ def set(self):
+ if self._set or self._closed:
+ return
+ self._set = True
+ self._wsock.send(b"*")
+
+ def set_forever(self):
+ self._forever = True
+ self.set()
+
+
+class OrPipe:
+ def __init__(self, pipe):
+ self._set = False
+ self._partner = None
+ self._pipe = pipe
+
+ def set(self):
+ self._set = True
+ if not self._partner._set:
+ self._pipe.set()
+
+ def clear(self):
+ self._set = False
+ if not self._partner._set:
+ self._pipe.clear()
+
+
+def make_or_pipe(pipe):
+ """
+ wraps a pipe into two pipe-like objects which are "or"d together to
+ affect the real pipe. if either returned pipe is set, the wrapped pipe
+ is set. when both are cleared, the wrapped pipe is cleared.
+ """
+ p1 = OrPipe(pipe)
+ p2 = OrPipe(pipe)
+ p1._partner = p2
+ p2._partner = p1
+ return p1, p2
diff --git a/paramiko/pkey.py b/paramiko/pkey.py
new file mode 100644
index 0000000..ef37100
--- /dev/null
+++ b/paramiko/pkey.py
@@ -0,0 +1,938 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Common API for all public keys.
+"""
+
+import base64
+from base64 import encodebytes, decodebytes
+from binascii import unhexlify
+import os
+from pathlib import Path
+from hashlib import md5, sha256
+import re
+import struct
+
+import bcrypt
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher
+from cryptography.hazmat.primitives import asymmetric
+
+from paramiko import util
+from paramiko.util import u, b
+from paramiko.common import o600
+from paramiko.ssh_exception import SSHException, PasswordRequiredException
+from paramiko.message import Message
+
+
+OPENSSH_AUTH_MAGIC = b"openssh-key-v1\x00"
+
+
+def _unpad_openssh(data):
+ # At the moment, this is only used for unpadding private keys on disk. This
+ # really ought to be made constant time (possibly by upstreaming this logic
+ # into pyca/cryptography).
+ padding_length = data[-1]
+ if 0x20 <= padding_length < 0x7F:
+ return data # no padding, last byte part comment (printable ascii)
+ if padding_length > 15:
+ raise SSHException("Invalid key")
+ for i in range(padding_length):
+ if data[i - padding_length] != i + 1:
+ raise SSHException("Invalid key")
+ return data[:-padding_length]
+
+
+class UnknownKeyType(Exception):
+ """
+ An unknown public/private key algorithm was attempted to be read.
+ """
+
+ def __init__(self, key_type=None, key_bytes=None):
+ self.key_type = key_type
+ self.key_bytes = key_bytes
+
+ def __str__(self):
+ return f"UnknownKeyType(type={self.key_type!r}, bytes=<{len(self.key_bytes)}>)" # noqa
+
+
+class PKey:
+ """
+ Base class for public keys.
+
+ Also includes some "meta" level convenience constructors such as
+ `.from_type_string`.
+ """
+
+ # known encryption types for private key files:
+ _CIPHER_TABLE = {
+ "AES-128-CBC": {
+ "cipher": algorithms.AES,
+ "keysize": 16,
+ "blocksize": 16,
+ "mode": modes.CBC,
+ },
+ "AES-256-CBC": {
+ "cipher": algorithms.AES,
+ "keysize": 32,
+ "blocksize": 16,
+ "mode": modes.CBC,
+ },
+ "DES-EDE3-CBC": {
+ "cipher": algorithms.TripleDES,
+ "keysize": 24,
+ "blocksize": 8,
+ "mode": modes.CBC,
+ },
+ }
+ _PRIVATE_KEY_FORMAT_ORIGINAL = 1
+ _PRIVATE_KEY_FORMAT_OPENSSH = 2
+ BEGIN_TAG = re.compile(
+ r"^-{5}BEGIN (RSA|DSA|EC|OPENSSH) PRIVATE KEY-{5}\s*$"
+ )
+ END_TAG = re.compile(r"^-{5}END (RSA|DSA|EC|OPENSSH) PRIVATE KEY-{5}\s*$")
+
+ @staticmethod
+ def from_path(path, passphrase=None):
+ """
+ Attempt to instantiate appropriate key subclass from given file path.
+
+ :param Path path: The path to load (may also be a `str`).
+
+ :returns:
+ A `PKey` subclass instance.
+
+ :raises:
+ `UnknownKeyType`, if our crypto backend doesn't know this key type.
+
+ .. versionadded:: 3.2
+ """
+ # TODO: make sure sphinx is reading Path right in param list...
+
+ # Lazy import to avoid circular import issues
+ from paramiko import DSSKey, RSAKey, Ed25519Key, ECDSAKey
+
+ # Normalize to string, as cert suffix isn't quite an extension, so
+ # pathlib isn't useful for this.
+ path = str(path)
+
+ # Sort out cert vs key, i.e. it is 'legal' to hand this kind of API
+ # /either/ the key /or/ the cert, when there is a key/cert pair.
+ cert_suffix = "-cert.pub"
+ if str(path).endswith(cert_suffix):
+ key_path = path[: -len(cert_suffix)]
+ cert_path = path
+ else:
+ key_path = path
+ cert_path = path + cert_suffix
+
+ key_path = Path(key_path).expanduser()
+ cert_path = Path(cert_path).expanduser()
+
+ data = key_path.read_bytes()
+ # Like OpenSSH, try modern/OpenSSH-specific key load first
+ try:
+ loaded = serialization.load_ssh_private_key(
+ data=data, password=passphrase
+ )
+ # Then fall back to assuming legacy PEM type
+ except ValueError:
+ loaded = serialization.load_pem_private_key(
+ data=data, password=passphrase
+ )
+ # TODO Python 3.10: match statement? (NOTE: we cannot use a dict
+ # because the results from the loader are literal backend, eg openssl,
+ # private classes, so isinstance tests work but exact 'x class is y'
+ # tests will not work)
+ # TODO: leverage already-parsed/math'd obj to avoid duplicate cpu
+ # cycles? seemingly requires most of our key subclasses to be rewritten
+ # to be cryptography-object-forward. this is still likely faster than
+ # the old SSHClient code that just tried instantiating every class!
+ key_class = None
+ if isinstance(loaded, asymmetric.dsa.DSAPrivateKey):
+ key_class = DSSKey
+ elif isinstance(loaded, asymmetric.rsa.RSAPrivateKey):
+ key_class = RSAKey
+ elif isinstance(loaded, asymmetric.ed25519.Ed25519PrivateKey):
+ key_class = Ed25519Key
+ elif isinstance(loaded, asymmetric.ec.EllipticCurvePrivateKey):
+ key_class = ECDSAKey
+ else:
+ raise UnknownKeyType(key_bytes=data, key_type=loaded.__class__)
+ with key_path.open() as fd:
+ key = key_class.from_private_key(fd, password=passphrase)
+ if cert_path.exists():
+ # load_certificate can take Message, path-str, or value-str
+ key.load_certificate(str(cert_path))
+ return key
+
+ @staticmethod
+ def from_type_string(key_type, key_bytes):
+ """
+ Given type `str` & raw `bytes`, return a `PKey` subclass instance.
+
+ For example, ``PKey.from_type_string("ssh-ed25519", <public bytes>)``
+ will (if successful) return a new `.Ed25519Key`.
+
+ :param str key_type:
+ The key type, eg ``"ssh-ed25519"``.
+ :param bytes key_bytes:
+ The raw byte data forming the key material, as expected by
+ subclasses' ``data`` parameter.
+
+ :returns:
+ A `PKey` subclass instance.
+
+ :raises:
+ `UnknownKeyType`, if no registered classes knew about this type.
+
+ .. versionadded:: 3.2
+ """
+ from paramiko import key_classes
+
+ for key_class in key_classes:
+ if key_type in key_class.identifiers():
+ # TODO: needs to passthru things like passphrase
+ return key_class(data=key_bytes)
+ raise UnknownKeyType(key_type=key_type, key_bytes=key_bytes)
+
+ @classmethod
+ def identifiers(cls):
+ """
+ returns an iterable of key format/name strings this class can handle.
+
+ Most classes only have a single identifier, and thus this default
+ implementation suffices; see `.ECDSAKey` for one example of an
+ override.
+ """
+ return [cls.name]
+
+ # TODO 4.0: make this and subclasses consistent, some of our own
+ # classmethods even assume kwargs we don't define!
+ # TODO 4.0: prob also raise NotImplementedError instead of pass'ing; the
+ # contract is pretty obviously that you need to handle msg/data/filename
+ # appropriately. (If 'pass' is a concession to testing, see about doing the
+ # work to fix the tests instead)
+ def __init__(self, msg=None, data=None):
+ """
+ Create a new instance of this public key type. If ``msg`` is given,
+ the key's public part(s) will be filled in from the message. If
+ ``data`` is given, the key's public part(s) will be filled in from
+ the string.
+
+ :param .Message msg:
+ an optional SSH `.Message` containing a public key of this type.
+ :param bytes data:
+ optional, the bytes of a public key of this type
+
+ :raises: `.SSHException` --
+ if a key cannot be created from the ``data`` or ``msg`` given, or
+ no key was passed in.
+ """
+ pass
+
+ # TODO: arguably this might want to be __str__ instead? ehh
+ # TODO: ditto the interplay between showing class name (currently we just
+ # say PKey writ large) and algorithm (usually == class name, but not
+ # always, also sometimes shows certificate-ness)
+ # TODO: if we do change it, we also want to tweak eg AgentKey, as it
+ # currently displays agent-ness with a suffix
+ def __repr__(self):
+ comment = ""
+ # Works for AgentKey, may work for others?
+ if hasattr(self, "comment") and self.comment:
+ comment = f", comment={self.comment!r}"
+ return f"PKey(alg={self.algorithm_name}, bits={self.get_bits()}, fp={self.fingerprint}{comment})" # noqa
+
+ # TODO 4.0: just merge into __bytes__ (everywhere)
+ def asbytes(self):
+ """
+ Return a string of an SSH `.Message` made up of the public part(s) of
+ this key. This string is suitable for passing to `__init__` to
+ re-create the key object later.
+ """
+ return bytes()
+
+ def __bytes__(self):
+ return self.asbytes()
+
+ def __eq__(self, other):
+ return isinstance(other, PKey) and self._fields == other._fields
+
+ def __hash__(self):
+ return hash(self._fields)
+
+ @property
+ def _fields(self):
+ raise NotImplementedError
+
+ def get_name(self):
+ """
+ Return the name of this private key implementation.
+
+ :return:
+ name of this private key type, in SSH terminology, as a `str` (for
+ example, ``"ssh-rsa"``).
+ """
+ return ""
+
+ @property
+ def algorithm_name(self):
+ """
+ Return the key algorithm identifier for this key.
+
+ Similar to `get_name`, but aimed at pure algorithm name instead of SSH
+ protocol field value.
+ """
+ # Nuke the leading 'ssh-'
+ # TODO in Python 3.9: use .removeprefix()
+ name = self.get_name().replace("ssh-", "")
+ # Trim any cert suffix (but leave the -cert, as OpenSSH does)
+ cert_tail = "-cert-v01@openssh.com"
+ if cert_tail in name:
+ name = name.replace(cert_tail, "-cert")
+ # Nuke any eg ECDSA suffix, OpenSSH does basically this too.
+ else:
+ name = name.split("-")[0]
+ return name.upper()
+
+ def get_bits(self):
+ """
+ Return the number of significant bits in this key. This is useful
+ for judging the relative security of a key.
+
+ :return: bits in the key (as an `int`)
+ """
+ # TODO 4.0: raise NotImplementedError, 0 is unlikely to ever be
+ # _correct_ and nothing in the critical path seems to use this.
+ return 0
+
+ def can_sign(self):
+ """
+ Return ``True`` if this key has the private part necessary for signing
+ data.
+ """
+ return False
+
+ def get_fingerprint(self):
+ """
+ Return an MD5 fingerprint of the public part of this key. Nothing
+ secret is revealed.
+
+ :return:
+ a 16-byte `string <str>` (binary) of the MD5 fingerprint, in SSH
+ format.
+ """
+ return md5(self.asbytes()).digest()
+
+ @property
+ def fingerprint(self):
+ """
+ Modern fingerprint property designed to be comparable to OpenSSH.
+
+ Currently only does SHA256 (the OpenSSH default).
+
+ .. versionadded:: 3.2
+ """
+ hashy = sha256(bytes(self))
+ hash_name = hashy.name.upper()
+ b64ed = encodebytes(hashy.digest())
+ cleaned = u(b64ed).strip().rstrip("=") # yes, OpenSSH does this too!
+ return f"{hash_name}:{cleaned}"
+
+ def get_base64(self):
+ """
+ Return a base64 string containing the public part of this key. Nothing
+ secret is revealed. This format is compatible with that used to store
+ public key files or recognized host keys.
+
+ :return: a base64 `string <str>` containing the public part of the key.
+ """
+ return u(encodebytes(self.asbytes())).replace("\n", "")
+
+ def sign_ssh_data(self, data, algorithm=None):
+ """
+ Sign a blob of data with this private key, and return a `.Message`
+ representing an SSH signature message.
+
+ :param bytes data:
+ the data to sign.
+ :param str algorithm:
+ the signature algorithm to use, if different from the key's
+ internal name. Default: ``None``.
+ :return: an SSH signature `message <.Message>`.
+
+ .. versionchanged:: 2.9
+ Added the ``algorithm`` kwarg.
+ """
+ return bytes()
+
+ def verify_ssh_sig(self, data, msg):
+ """
+ Given a blob of data, and an SSH message representing a signature of
+ that data, verify that it was signed with this key.
+
+ :param bytes data: the data that was signed.
+ :param .Message msg: an SSH signature message
+ :return:
+ ``True`` if the signature verifies correctly; ``False`` otherwise.
+ """
+ return False
+
+ @classmethod
+ def from_private_key_file(cls, filename, password=None):
+ """
+ Create a key object by reading a private key file. If the private
+ key is encrypted and ``password`` is not ``None``, the given password
+ will be used to decrypt the key (otherwise `.PasswordRequiredException`
+ is thrown). Through the magic of Python, this factory method will
+ exist in all subclasses of PKey (such as `.RSAKey` or `.DSSKey`), but
+ is useless on the abstract PKey class.
+
+ :param str filename: name of the file to read
+ :param str password:
+ an optional password to use to decrypt the key file, if it's
+ encrypted
+ :return: a new `.PKey` based on the given private key
+
+ :raises: ``IOError`` -- if there was an error reading the file
+ :raises: `.PasswordRequiredException` -- if the private key file is
+ encrypted, and ``password`` is ``None``
+ :raises: `.SSHException` -- if the key file is invalid
+ """
+ key = cls(filename=filename, password=password)
+ return key
+
+ @classmethod
+ def from_private_key(cls, file_obj, password=None):
+ """
+ Create a key object by reading a private key from a file (or file-like)
+ object. If the private key is encrypted and ``password`` is not
+ ``None``, the given password will be used to decrypt the key (otherwise
+ `.PasswordRequiredException` is thrown).
+
+ :param file_obj: the file-like object to read from
+ :param str password:
+ an optional password to use to decrypt the key, if it's encrypted
+ :return: a new `.PKey` based on the given private key
+
+ :raises: ``IOError`` -- if there was an error reading the key
+ :raises: `.PasswordRequiredException` --
+ if the private key file is encrypted, and ``password`` is ``None``
+ :raises: `.SSHException` -- if the key file is invalid
+ """
+ key = cls(file_obj=file_obj, password=password)
+ return key
+
+ def write_private_key_file(self, filename, password=None):
+ """
+ Write private key contents into a file. If the password is not
+ ``None``, the key is encrypted before writing.
+
+ :param str filename: name of the file to write
+ :param str password:
+ an optional password to use to encrypt the key file
+
+ :raises: ``IOError`` -- if there was an error writing the file
+ :raises: `.SSHException` -- if the key is invalid
+ """
+ raise Exception("Not implemented in PKey")
+
+ def write_private_key(self, file_obj, password=None):
+ """
+ Write private key contents into a file (or file-like) object. If the
+ password is not ``None``, the key is encrypted before writing.
+
+ :param file_obj: the file-like object to write into
+ :param str password: an optional password to use to encrypt the key
+
+ :raises: ``IOError`` -- if there was an error writing to the file
+ :raises: `.SSHException` -- if the key is invalid
+ """
+ # TODO 4.0: NotImplementedError (plus everywhere else in here)
+ raise Exception("Not implemented in PKey")
+
+ def _read_private_key_file(self, tag, filename, password=None):
+ """
+ Read an SSH2-format private key file, looking for a string of the type
+ ``"BEGIN xxx PRIVATE KEY"`` for some ``xxx``, base64-decode the text we
+ find, and return it as a string. If the private key is encrypted and
+ ``password`` is not ``None``, the given password will be used to
+ decrypt the key (otherwise `.PasswordRequiredException` is thrown).
+
+ :param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the
+ data block.
+ :param str filename: name of the file to read.
+ :param str password:
+ an optional password to use to decrypt the key file, if it's
+ encrypted.
+ :return: the `bytes` that make up the private key.
+
+ :raises: ``IOError`` -- if there was an error reading the file.
+ :raises: `.PasswordRequiredException` -- if the private key file is
+ encrypted, and ``password`` is ``None``.
+ :raises: `.SSHException` -- if the key file is invalid.
+ """
+ with open(filename, "r") as f:
+ data = self._read_private_key(tag, f, password)
+ return data
+
+ def _read_private_key(self, tag, f, password=None):
+ lines = f.readlines()
+ if not lines:
+ raise SSHException("no lines in {} private key file".format(tag))
+
+ # find the BEGIN tag
+ start = 0
+ m = self.BEGIN_TAG.match(lines[start])
+ line_range = len(lines) - 1
+ while start < line_range and not m:
+ start += 1
+ m = self.BEGIN_TAG.match(lines[start])
+ start += 1
+ keytype = m.group(1) if m else None
+ if start >= len(lines) or keytype is None:
+ raise SSHException("not a valid {} private key file".format(tag))
+
+ # find the END tag
+ end = start
+ m = self.END_TAG.match(lines[end])
+ while end < line_range and not m:
+ end += 1
+ m = self.END_TAG.match(lines[end])
+
+ if keytype == tag:
+ data = self._read_private_key_pem(lines, end, password)
+ pkformat = self._PRIVATE_KEY_FORMAT_ORIGINAL
+ elif keytype == "OPENSSH":
+ data = self._read_private_key_openssh(lines[start:end], password)
+ pkformat = self._PRIVATE_KEY_FORMAT_OPENSSH
+ else:
+ raise SSHException(
+ "encountered {} key, expected {} key".format(keytype, tag)
+ )
+
+ return pkformat, data
+
+ def _got_bad_key_format_id(self, id_):
+ err = "{}._read_private_key() spat out an unknown key format id '{}'"
+ raise SSHException(err.format(self.__class__.__name__, id_))
+
+ def _read_private_key_pem(self, lines, end, password):
+ start = 0
+ # parse any headers first
+ headers = {}
+ start += 1
+ while start < len(lines):
+ line = lines[start].split(": ")
+ if len(line) == 1:
+ break
+ headers[line[0].lower()] = line[1].strip()
+ start += 1
+ # if we trudged to the end of the file, just try to cope.
+ try:
+ data = decodebytes(b("".join(lines[start:end])))
+ except base64.binascii.Error as e:
+ raise SSHException("base64 decoding error: {}".format(e))
+ if "proc-type" not in headers:
+ # unencryped: done
+ return data
+ # encrypted keyfile: will need a password
+ proc_type = headers["proc-type"]
+ if proc_type != "4,ENCRYPTED":
+ raise SSHException(
+ 'Unknown private key structure "{}"'.format(proc_type)
+ )
+ try:
+ encryption_type, saltstr = headers["dek-info"].split(",")
+ except:
+ raise SSHException("Can't parse DEK-info in private key file")
+ if encryption_type not in self._CIPHER_TABLE:
+ raise SSHException(
+ 'Unknown private key cipher "{}"'.format(encryption_type)
+ )
+ # if no password was passed in,
+ # raise an exception pointing out that we need one
+ if password is None:
+ raise PasswordRequiredException("Private key file is encrypted")
+ cipher = self._CIPHER_TABLE[encryption_type]["cipher"]
+ keysize = self._CIPHER_TABLE[encryption_type]["keysize"]
+ mode = self._CIPHER_TABLE[encryption_type]["mode"]
+ salt = unhexlify(b(saltstr))
+ key = util.generate_key_bytes(md5, salt, password, keysize)
+ decryptor = Cipher(
+ cipher(key), mode(salt), backend=default_backend()
+ ).decryptor()
+ return decryptor.update(data) + decryptor.finalize()
+
+ def _read_private_key_openssh(self, lines, password):
+ """
+ Read the new OpenSSH SSH2 private key format available
+ since OpenSSH version 6.5
+ Reference:
+ https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
+ """
+ try:
+ data = decodebytes(b("".join(lines)))
+ except base64.binascii.Error as e:
+ raise SSHException("base64 decoding error: {}".format(e))
+
+ # read data struct
+ auth_magic = data[:15]
+ if auth_magic != OPENSSH_AUTH_MAGIC:
+ raise SSHException("unexpected OpenSSH key header encountered")
+
+ cstruct = self._uint32_cstruct_unpack(data[15:], "sssur")
+ cipher, kdfname, kdf_options, num_pubkeys, remainder = cstruct
+ # For now, just support 1 key.
+ if num_pubkeys > 1:
+ raise SSHException(
+ "unsupported: private keyfile has multiple keys"
+ )
+ pubkey, privkey_blob = self._uint32_cstruct_unpack(remainder, "ss")
+
+ if kdfname == b("bcrypt"):
+ if cipher == b("aes256-cbc"):
+ mode = modes.CBC
+ elif cipher == b("aes256-ctr"):
+ mode = modes.CTR
+ else:
+ raise SSHException(
+ "unknown cipher `{}` used in private key file".format(
+ cipher.decode("utf-8")
+ )
+ )
+ # Encrypted private key.
+ # If no password was passed in, raise an exception pointing
+ # out that we need one
+ if password is None:
+ raise PasswordRequiredException(
+ "private key file is encrypted"
+ )
+
+ # Unpack salt and rounds from kdfoptions
+ salt, rounds = self._uint32_cstruct_unpack(kdf_options, "su")
+
+ # run bcrypt kdf to derive key and iv/nonce (32 + 16 bytes)
+ key_iv = bcrypt.kdf(
+ b(password),
+ b(salt),
+ 48,
+ rounds,
+ # We can't control how many rounds are on disk, so no sense
+ # warning about it.
+ ignore_few_rounds=True,
+ )
+ key = key_iv[:32]
+ iv = key_iv[32:]
+
+ # decrypt private key blob
+ decryptor = Cipher(
+ algorithms.AES(key), mode(iv), default_backend()
+ ).decryptor()
+ decrypted_privkey = decryptor.update(privkey_blob)
+ decrypted_privkey += decryptor.finalize()
+ elif cipher == b("none") and kdfname == b("none"):
+ # Unencrypted private key
+ decrypted_privkey = privkey_blob
+ else:
+ raise SSHException(
+ "unknown cipher or kdf used in private key file"
+ )
+
+ # Unpack private key and verify checkints
+ cstruct = self._uint32_cstruct_unpack(decrypted_privkey, "uusr")
+ checkint1, checkint2, keytype, keydata = cstruct
+
+ if checkint1 != checkint2:
+ raise SSHException(
+ "OpenSSH private key file checkints do not match"
+ )
+
+ return _unpad_openssh(keydata)
+
+ def _uint32_cstruct_unpack(self, data, strformat):
+ """
+ Used to read new OpenSSH private key format.
+ Unpacks a c data structure containing a mix of 32-bit uints and
+ variable length strings prefixed by 32-bit uint size field,
+ according to the specified format. Returns the unpacked vars
+ in a tuple.
+ Format strings:
+ s - denotes a string
+ i - denotes a long integer, encoded as a byte string
+ u - denotes a 32-bit unsigned integer
+ r - the remainder of the input string, returned as a string
+ """
+ arr = []
+ idx = 0
+ try:
+ for f in strformat:
+ if f == "s":
+ # string
+ s_size = struct.unpack(">L", data[idx : idx + 4])[0]
+ idx += 4
+ s = data[idx : idx + s_size]
+ idx += s_size
+ arr.append(s)
+ if f == "i":
+ # long integer
+ s_size = struct.unpack(">L", data[idx : idx + 4])[0]
+ idx += 4
+ s = data[idx : idx + s_size]
+ idx += s_size
+ i = util.inflate_long(s, True)
+ arr.append(i)
+ elif f == "u":
+ # 32-bit unsigned int
+ u = struct.unpack(">L", data[idx : idx + 4])[0]
+ idx += 4
+ arr.append(u)
+ elif f == "r":
+ # remainder as string
+ s = data[idx:]
+ arr.append(s)
+ break
+ except Exception as e:
+ # PKey-consuming code frequently wants to save-and-skip-over issues
+ # with loading keys, and uses SSHException as the (really friggin
+ # awful) signal for this. So for now...we do this.
+ raise SSHException(str(e))
+ return tuple(arr)
+
+ def _write_private_key_file(self, filename, key, format, password=None):
+ """
+ Write an SSH2-format private key file in a form that can be read by
+ paramiko or openssh. If no password is given, the key is written in
+ a trivially-encoded format (base64) which is completely insecure. If
+ a password is given, DES-EDE3-CBC is used.
+
+ :param str tag:
+ ``"RSA"`` or ``"DSA"``, the tag used to mark the data block.
+ :param filename: name of the file to write.
+ :param bytes data: data blob that makes up the private key.
+ :param str password: an optional password to use to encrypt the file.
+
+ :raises: ``IOError`` -- if there was an error writing the file.
+ """
+ # Ensure that we create new key files directly with a user-only mode,
+ # instead of opening, writing, then chmodding, which leaves us open to
+ # CVE-2022-24302.
+ with os.fdopen(
+ os.open(
+ filename,
+ # NOTE: O_TRUNC is a noop on new files, and O_CREAT is a noop
+ # on existing files, so using all 3 in both cases is fine.
+ flags=os.O_WRONLY | os.O_TRUNC | os.O_CREAT,
+ # Ditto the use of the 'mode' argument; it should be safe to
+ # give even for existing files (though it will not act like a
+ # chmod in that case).
+ mode=o600,
+ ),
+ # Yea, you still gotta inform the FLO that it is in "write" mode.
+ "w",
+ ) as f:
+ self._write_private_key(f, key, format, password=password)
+
+ def _write_private_key(self, f, key, format, password=None):
+ if password is None:
+ encryption = serialization.NoEncryption()
+ else:
+ encryption = serialization.BestAvailableEncryption(b(password))
+
+ f.write(
+ key.private_bytes(
+ serialization.Encoding.PEM, format, encryption
+ ).decode()
+ )
+
+ def _check_type_and_load_cert(self, msg, key_type, cert_type):
+ """
+ Perform message type-checking & optional certificate loading.
+
+ This includes fast-forwarding cert ``msg`` objects past the nonce, so
+ that the subsequent fields are the key numbers; thus the caller may
+ expect to treat the message as key material afterwards either way.
+
+ The obtained key type is returned for classes which need to know what
+ it was (e.g. ECDSA.)
+ """
+ # Normalization; most classes have a single key type and give a string,
+ # but eg ECDSA is a 1:N mapping.
+ key_types = key_type
+ cert_types = cert_type
+ if isinstance(key_type, str):
+ key_types = [key_types]
+ if isinstance(cert_types, str):
+ cert_types = [cert_types]
+ # Can't do much with no message, that should've been handled elsewhere
+ if msg is None:
+ raise SSHException("Key object may not be empty")
+ # First field is always key type, in either kind of object. (make sure
+ # we rewind before grabbing it - sometimes caller had to do their own
+ # introspection first!)
+ msg.rewind()
+ type_ = msg.get_text()
+ # Regular public key - nothing special to do besides the implicit
+ # type check.
+ if type_ in key_types:
+ pass
+ # OpenSSH-compatible certificate - store full copy as .public_blob
+ # (so signing works correctly) and then fast-forward past the
+ # nonce.
+ elif type_ in cert_types:
+ # This seems the cleanest way to 'clone' an already-being-read
+ # message; they're *IO objects at heart and their .getvalue()
+ # always returns the full value regardless of pointer position.
+ self.load_certificate(Message(msg.asbytes()))
+ # Read out nonce as it comes before the public numbers - our caller
+ # is likely going to use the (only borrowed by us, not owned)
+ # 'msg' object for loading those numbers right after this.
+ # TODO: usefully interpret it & other non-public-number fields
+ # (requires going back into per-type subclasses.)
+ msg.get_string()
+ else:
+ err = "Invalid key (class: {}, data type: {}"
+ raise SSHException(err.format(self.__class__.__name__, type_))
+
+ def load_certificate(self, value):
+ """
+ Supplement the private key contents with data loaded from an OpenSSH
+ public key (``.pub``) or certificate (``-cert.pub``) file, a string
+ containing such a file, or a `.Message` object.
+
+ The .pub contents adds no real value, since the private key
+ file includes sufficient information to derive the public
+ key info. For certificates, however, this can be used on
+ the client side to offer authentication requests to the server
+ based on certificate instead of raw public key.
+
+ See:
+ https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.certkeys
+
+ Note: very little effort is made to validate the certificate contents,
+ that is for the server to decide if it is good enough to authenticate
+ successfully.
+ """
+ if isinstance(value, Message):
+ constructor = "from_message"
+ elif os.path.isfile(value):
+ constructor = "from_file"
+ else:
+ constructor = "from_string"
+ blob = getattr(PublicBlob, constructor)(value)
+ if not blob.key_type.startswith(self.get_name()):
+ err = "PublicBlob type {} incompatible with key type {}"
+ raise ValueError(err.format(blob.key_type, self.get_name()))
+ self.public_blob = blob
+
+
+# General construct for an OpenSSH style Public Key blob
+# readable from a one-line file of the format:
+# <key-name> <base64-blob> [<comment>]
+# Of little value in the case of standard public keys
+# {ssh-rsa, ssh-dss, ssh-ecdsa, ssh-ed25519}, but should
+# provide rudimentary support for {*-cert.v01}
+class PublicBlob:
+ """
+ OpenSSH plain public key or OpenSSH signed public key (certificate).
+
+ Tries to be as dumb as possible and barely cares about specific
+ per-key-type data.
+
+ .. note::
+
+ Most of the time you'll want to call `from_file`, `from_string` or
+ `from_message` for useful instantiation, the main constructor is
+ basically "I should be using ``attrs`` for this."
+ """
+
+ def __init__(self, type_, blob, comment=None):
+ """
+ Create a new public blob of given type and contents.
+
+ :param str type_: Type indicator, eg ``ssh-rsa``.
+ :param bytes blob: The blob bytes themselves.
+ :param str comment: A comment, if one was given (e.g. file-based.)
+ """
+ self.key_type = type_
+ self.key_blob = blob
+ self.comment = comment
+
+ @classmethod
+ def from_file(cls, filename):
+ """
+ Create a public blob from a ``-cert.pub``-style file on disk.
+ """
+ with open(filename) as f:
+ string = f.read()
+ return cls.from_string(string)
+
+ @classmethod
+ def from_string(cls, string):
+ """
+ Create a public blob from a ``-cert.pub``-style string.
+ """
+ fields = string.split(None, 2)
+ if len(fields) < 2:
+ msg = "Not enough fields for public blob: {}"
+ raise ValueError(msg.format(fields))
+ key_type = fields[0]
+ key_blob = decodebytes(b(fields[1]))
+ try:
+ comment = fields[2].strip()
+ except IndexError:
+ comment = None
+ # Verify that the blob message first (string) field matches the
+ # key_type
+ m = Message(key_blob)
+ blob_type = m.get_text()
+ if blob_type != key_type:
+ deets = "key type={!r}, but blob type={!r}".format(
+ key_type, blob_type
+ )
+ raise ValueError("Invalid PublicBlob contents: {}".format(deets))
+ # All good? All good.
+ return cls(type_=key_type, blob=key_blob, comment=comment)
+
+ @classmethod
+ def from_message(cls, message):
+ """
+ Create a public blob from a network `.Message`.
+
+ Specifically, a cert-bearing pubkey auth packet, because by definition
+ OpenSSH-style certificates 'are' their own network representation."
+ """
+ type_ = message.get_text()
+ return cls(type_=type_, blob=message.asbytes())
+
+ def __str__(self):
+ ret = "{} public key/certificate".format(self.key_type)
+ if self.comment:
+ ret += "- {}".format(self.comment)
+ return ret
+
+ def __eq__(self, other):
+ # Just piggyback on Message/BytesIO, since both of these should be one.
+ return self and other and self.key_blob == other.key_blob
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/paramiko/primes.py b/paramiko/primes.py
new file mode 100644
index 0000000..663c58e
--- /dev/null
+++ b/paramiko/primes.py
@@ -0,0 +1,148 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Utility functions for dealing with primes.
+"""
+
+import os
+
+from paramiko import util
+from paramiko.common import byte_mask
+from paramiko.ssh_exception import SSHException
+
+
+def _roll_random(n):
+ """returns a random # from 0 to N-1"""
+ bits = util.bit_length(n - 1)
+ byte_count = (bits + 7) // 8
+ hbyte_mask = pow(2, bits % 8) - 1
+
+ # so here's the plan:
+ # we fetch as many random bits as we'd need to fit N-1, and if the
+ # generated number is >= N, we try again. in the worst case (N-1 is a
+ # power of 2), we have slightly better than 50% odds of getting one that
+ # fits, so i can't guarantee that this loop will ever finish, but the odds
+ # of it looping forever should be infinitesimal.
+ while True:
+ x = os.urandom(byte_count)
+ if hbyte_mask > 0:
+ x = byte_mask(x[0], hbyte_mask) + x[1:]
+ num = util.inflate_long(x, 1)
+ if num < n:
+ break
+ return num
+
+
+class ModulusPack:
+ """
+ convenience object for holding the contents of the /etc/ssh/moduli file,
+ on systems that have such a file.
+ """
+
+ def __init__(self):
+ # pack is a hash of: bits -> [ (generator, modulus) ... ]
+ self.pack = {}
+ self.discarded = []
+
+ def _parse_modulus(self, line):
+ (
+ timestamp,
+ mod_type,
+ tests,
+ tries,
+ size,
+ generator,
+ modulus,
+ ) = line.split()
+ mod_type = int(mod_type)
+ tests = int(tests)
+ tries = int(tries)
+ size = int(size)
+ generator = int(generator)
+ modulus = int(modulus, 16)
+
+ # weed out primes that aren't at least:
+ # type 2 (meets basic structural requirements)
+ # test 4 (more than just a small-prime sieve)
+ # tries < 100 if test & 4 (at least 100 tries of miller-rabin)
+ if (
+ mod_type < 2
+ or tests < 4
+ or (tests & 4 and tests < 8 and tries < 100)
+ ):
+ self.discarded.append(
+ (modulus, "does not meet basic requirements")
+ )
+ return
+ if generator == 0:
+ generator = 2
+
+ # there's a bug in the ssh "moduli" file (yeah, i know: shock! dismay!
+ # call cnn!) where it understates the bit lengths of these primes by 1.
+ # this is okay.
+ bl = util.bit_length(modulus)
+ if (bl != size) and (bl != size + 1):
+ self.discarded.append(
+ (modulus, "incorrectly reported bit length {}".format(size))
+ )
+ return
+ if bl not in self.pack:
+ self.pack[bl] = []
+ self.pack[bl].append((generator, modulus))
+
+ def read_file(self, filename):
+ """
+ :raises IOError: passed from any file operations that fail.
+ """
+ self.pack = {}
+ with open(filename, "r") as f:
+ for line in f:
+ line = line.strip()
+ if (len(line) == 0) or (line[0] == "#"):
+ continue
+ try:
+ self._parse_modulus(line)
+ except:
+ continue
+
+ def get_modulus(self, min, prefer, max):
+ bitsizes = sorted(self.pack.keys())
+ if len(bitsizes) == 0:
+ raise SSHException("no moduli available")
+ good = -1
+ # find nearest bitsize >= preferred
+ for b in bitsizes:
+ if (b >= prefer) and (b <= max) and (b < good or good == -1):
+ good = b
+ # if that failed, find greatest bitsize >= min
+ if good == -1:
+ for b in bitsizes:
+ if (b >= min) and (b <= max) and (b > good):
+ good = b
+ if good == -1:
+ # their entire (min, max) range has no intersection with our range.
+ # if their range is below ours, pick the smallest. otherwise pick
+ # the largest. it'll be out of their range requirement either way,
+ # but we'll be sending them the closest one we have.
+ good = bitsizes[0]
+ if min > good:
+ good = bitsizes[-1]
+ # now pick a random modulus of this bitsize
+ n = _roll_random(len(self.pack[good]))
+ return self.pack[good][n]
diff --git a/paramiko/proxy.py b/paramiko/proxy.py
new file mode 100644
index 0000000..f7609c9
--- /dev/null
+++ b/paramiko/proxy.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2012 Yipit, Inc <coders@yipit.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+import os
+import shlex
+import signal
+from select import select
+import socket
+import time
+
+# Try-and-ignore import so platforms w/o subprocess (eg Google App Engine) can
+# still import paramiko.
+subprocess, subprocess_import_error = None, None
+try:
+ import subprocess
+except ImportError as e:
+ subprocess_import_error = e
+
+from paramiko.ssh_exception import ProxyCommandFailure
+from paramiko.util import ClosingContextManager
+
+
+class ProxyCommand(ClosingContextManager):
+ """
+ Wraps a subprocess running ProxyCommand-driven programs.
+
+ This class implements a the socket-like interface needed by the
+ `.Transport` and `.Packetizer` classes. Using this class instead of a
+ regular socket makes it possible to talk with a Popen'd command that will
+ proxy traffic between the client and a server hosted in another machine.
+
+ Instances of this class may be used as context managers.
+ """
+
+ def __init__(self, command_line):
+ """
+ Create a new CommandProxy instance. The instance created by this
+ class can be passed as an argument to the `.Transport` class.
+
+ :param str command_line:
+ the command that should be executed and used as the proxy.
+ """
+ if subprocess is None:
+ raise subprocess_import_error
+ self.cmd = shlex.split(command_line)
+ self.process = subprocess.Popen(
+ self.cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ bufsize=0,
+ )
+ self.timeout = None
+
+ def send(self, content):
+ """
+ Write the content received from the SSH client to the standard
+ input of the forked command.
+
+ :param str content: string to be sent to the forked command
+ """
+ try:
+ self.process.stdin.write(content)
+ except IOError as e:
+ # There was a problem with the child process. It probably
+ # died and we can't proceed. The best option here is to
+ # raise an exception informing the user that the informed
+ # ProxyCommand is not working.
+ raise ProxyCommandFailure(" ".join(self.cmd), e.strerror)
+ return len(content)
+
+ def recv(self, size):
+ """
+ Read from the standard output of the forked program.
+
+ :param int size: how many chars should be read
+
+ :return: the string of bytes read, which may be shorter than requested
+ """
+ try:
+ buffer = b""
+ start = time.time()
+ while len(buffer) < size:
+ select_timeout = None
+ if self.timeout is not None:
+ elapsed = time.time() - start
+ if elapsed >= self.timeout:
+ raise socket.timeout()
+ select_timeout = self.timeout - elapsed
+
+ r, w, x = select([self.process.stdout], [], [], select_timeout)
+ if r and r[0] == self.process.stdout:
+ buffer += os.read(
+ self.process.stdout.fileno(), size - len(buffer)
+ )
+ return buffer
+ except socket.timeout:
+ if buffer:
+ # Don't raise socket.timeout, return partial result instead
+ return buffer
+ raise # socket.timeout is a subclass of IOError
+ except IOError as e:
+ raise ProxyCommandFailure(" ".join(self.cmd), e.strerror)
+
+ def close(self):
+ os.kill(self.process.pid, signal.SIGTERM)
+
+ @property
+ def closed(self):
+ return self.process.returncode is not None
+
+ @property
+ def _closed(self):
+ # Concession to Python 3 socket-like API
+ return self.closed
+
+ def settimeout(self, timeout):
+ self.timeout = timeout
diff --git a/paramiko/rsakey.py b/paramiko/rsakey.py
new file mode 100644
index 0000000..b7ad3ce
--- /dev/null
+++ b/paramiko/rsakey.py
@@ -0,0 +1,227 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+RSA keys.
+"""
+
+from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes, serialization
+from cryptography.hazmat.primitives.asymmetric import rsa, padding
+
+from paramiko.message import Message
+from paramiko.pkey import PKey
+from paramiko.ssh_exception import SSHException
+
+
+class RSAKey(PKey):
+ """
+ Representation of an RSA key which can be used to sign and verify SSH2
+ data.
+ """
+
+ name = "ssh-rsa"
+ HASHES = {
+ "ssh-rsa": hashes.SHA1,
+ "ssh-rsa-cert-v01@openssh.com": hashes.SHA1,
+ "rsa-sha2-256": hashes.SHA256,
+ "rsa-sha2-256-cert-v01@openssh.com": hashes.SHA256,
+ "rsa-sha2-512": hashes.SHA512,
+ "rsa-sha2-512-cert-v01@openssh.com": hashes.SHA512,
+ }
+
+ def __init__(
+ self,
+ msg=None,
+ data=None,
+ filename=None,
+ password=None,
+ key=None,
+ file_obj=None,
+ ):
+ self.key = None
+ self.public_blob = None
+ if file_obj is not None:
+ self._from_private_key(file_obj, password)
+ return
+ if filename is not None:
+ self._from_private_key_file(filename, password)
+ return
+ if (msg is None) and (data is not None):
+ msg = Message(data)
+ if key is not None:
+ self.key = key
+ else:
+ self._check_type_and_load_cert(
+ msg=msg,
+ # NOTE: this does NOT change when using rsa2 signatures; it's
+ # purely about key loading, not exchange or verification
+ key_type=self.name,
+ cert_type="ssh-rsa-cert-v01@openssh.com",
+ )
+ self.key = rsa.RSAPublicNumbers(
+ e=msg.get_mpint(), n=msg.get_mpint()
+ ).public_key(default_backend())
+
+ @classmethod
+ def identifiers(cls):
+ return list(cls.HASHES.keys())
+
+ @property
+ def size(self):
+ return self.key.key_size
+
+ @property
+ def public_numbers(self):
+ if isinstance(self.key, rsa.RSAPrivateKey):
+ return self.key.private_numbers().public_numbers
+ else:
+ return self.key.public_numbers()
+
+ def asbytes(self):
+ m = Message()
+ m.add_string(self.name)
+ m.add_mpint(self.public_numbers.e)
+ m.add_mpint(self.public_numbers.n)
+ return m.asbytes()
+
+ def __str__(self):
+ # NOTE: see #853 to explain some legacy behavior.
+ # TODO 4.0: replace with a nice clean fingerprint display or something
+ return self.asbytes().decode("utf8", errors="ignore")
+
+ @property
+ def _fields(self):
+ return (self.get_name(), self.public_numbers.e, self.public_numbers.n)
+
+ def get_name(self):
+ return self.name
+
+ def get_bits(self):
+ return self.size
+
+ def can_sign(self):
+ return isinstance(self.key, rsa.RSAPrivateKey)
+
+ def sign_ssh_data(self, data, algorithm=None):
+ if algorithm is None:
+ algorithm = self.name
+ sig = self.key.sign(
+ data,
+ padding=padding.PKCS1v15(),
+ # HASHES being just a map from long identifier to either SHA1 or
+ # SHA256 - cert'ness is not truly relevant.
+ algorithm=self.HASHES[algorithm](),
+ )
+ m = Message()
+ # And here again, cert'ness is irrelevant, so it is stripped out.
+ m.add_string(algorithm.replace("-cert-v01@openssh.com", ""))
+ m.add_string(sig)
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ sig_algorithm = msg.get_text()
+ if sig_algorithm not in self.HASHES:
+ return False
+ key = self.key
+ if isinstance(key, rsa.RSAPrivateKey):
+ key = key.public_key()
+
+ # NOTE: pad received signature with leading zeros, key.verify()
+ # expects a signature of key size (e.g. PuTTY doesn't pad)
+ sign = msg.get_binary()
+ diff = key.key_size - len(sign) * 8
+ if diff > 0:
+ sign = b"\x00" * ((diff + 7) // 8) + sign
+
+ try:
+ key.verify(
+ sign, data, padding.PKCS1v15(), self.HASHES[sig_algorithm]()
+ )
+ except InvalidSignature:
+ return False
+ else:
+ return True
+
+ def write_private_key_file(self, filename, password=None):
+ self._write_private_key_file(
+ filename,
+ self.key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ def write_private_key(self, file_obj, password=None):
+ self._write_private_key(
+ file_obj,
+ self.key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ @staticmethod
+ def generate(bits, progress_func=None):
+ """
+ Generate a new private RSA key. This factory function can be used to
+ generate a new host key or authentication key.
+
+ :param int bits: number of bits the generated key should be.
+ :param progress_func: Unused
+ :return: new `.RSAKey` private key
+ """
+ key = rsa.generate_private_key(
+ public_exponent=65537, key_size=bits, backend=default_backend()
+ )
+ return RSAKey(key=key)
+
+ # ...internals...
+
+ def _from_private_key_file(self, filename, password):
+ data = self._read_private_key_file("RSA", filename, password)
+ self._decode_key(data)
+
+ def _from_private_key(self, file_obj, password):
+ data = self._read_private_key("RSA", file_obj, password)
+ self._decode_key(data)
+
+ def _decode_key(self, data):
+ pkformat, data = data
+ if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
+ try:
+ key = serialization.load_der_private_key(
+ data, password=None, backend=default_backend()
+ )
+ except (ValueError, TypeError, UnsupportedAlgorithm) as e:
+ raise SSHException(str(e))
+ elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
+ n, e, d, iqmp, p, q = self._uint32_cstruct_unpack(data, "iiiiii")
+ public_numbers = rsa.RSAPublicNumbers(e=e, n=n)
+ key = rsa.RSAPrivateNumbers(
+ p=p,
+ q=q,
+ d=d,
+ dmp1=d % (p - 1),
+ dmq1=d % (q - 1),
+ iqmp=iqmp,
+ public_numbers=public_numbers,
+ ).private_key(default_backend())
+ else:
+ self._got_bad_key_format_id(pkformat)
+ assert isinstance(key, rsa.RSAPrivateKey)
+ self.key = key
diff --git a/paramiko/server.py b/paramiko/server.py
new file mode 100644
index 0000000..6923bdf
--- /dev/null
+++ b/paramiko/server.py
@@ -0,0 +1,732 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+`.ServerInterface` is an interface to override for server support.
+"""
+
+import threading
+from paramiko import util
+from paramiko.common import (
+ DEBUG,
+ ERROR,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ AUTH_FAILED,
+ AUTH_SUCCESSFUL,
+)
+
+
+class ServerInterface:
+ """
+ This class defines an interface for controlling the behavior of Paramiko
+ in server mode.
+
+ Methods on this class are called from Paramiko's primary thread, so you
+ shouldn't do too much work in them. (Certainly nothing that blocks or
+ sleeps.)
+ """
+
+ def check_channel_request(self, kind, chanid):
+ """
+ Determine if a channel request of a given type will be granted, and
+ return ``OPEN_SUCCEEDED`` or an error code. This method is
+ called in server mode when the client requests a channel, after
+ authentication is complete.
+
+ If you allow channel requests (and an ssh server that didn't would be
+ useless), you should also override some of the channel request methods
+ below, which are used to determine which services will be allowed on
+ a given channel:
+
+ - `check_channel_pty_request`
+ - `check_channel_shell_request`
+ - `check_channel_subsystem_request`
+ - `check_channel_window_change_request`
+ - `check_channel_x11_request`
+ - `check_channel_forward_agent_request`
+
+ The ``chanid`` parameter is a small number that uniquely identifies the
+ channel within a `.Transport`. A `.Channel` object is not created
+ unless this method returns ``OPEN_SUCCEEDED`` -- once a
+ `.Channel` object is created, you can call `.Channel.get_id` to
+ retrieve the channel ID.
+
+ The return value should either be ``OPEN_SUCCEEDED`` (or
+ ``0``) to allow the channel request, or one of the following error
+ codes to reject it:
+
+ - ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
+ - ``OPEN_FAILED_CONNECT_FAILED``
+ - ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
+ - ``OPEN_FAILED_RESOURCE_SHORTAGE``
+
+ The default implementation always returns
+ ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
+
+ :param str kind:
+ the kind of channel the client would like to open (usually
+ ``"session"``).
+ :param int chanid: ID of the channel
+ :return: an `int` success or failure code (listed above)
+ """
+ return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+
+ def get_allowed_auths(self, username):
+ """
+ Return a list of authentication methods supported by the server.
+ This list is sent to clients attempting to authenticate, to inform them
+ of authentication methods that might be successful.
+
+ The "list" is actually a string of comma-separated names of types of
+ authentication. Possible values are ``"password"``, ``"publickey"``,
+ and ``"none"``.
+
+ The default implementation always returns ``"password"``.
+
+ :param str username: the username requesting authentication.
+ :return: a comma-separated `str` of authentication types
+ """
+ return "password"
+
+ def check_auth_none(self, username):
+ """
+ Determine if a client may open channels with no (further)
+ authentication.
+
+ Return ``AUTH_FAILED`` if the client must authenticate, or
+ ``AUTH_SUCCESSFUL`` if it's okay for the client to not
+ authenticate.
+
+ The default implementation always returns ``AUTH_FAILED``.
+
+ :param str username: the username of the client.
+ :return:
+ ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
+ it succeeds.
+ :rtype: int
+ """
+ return AUTH_FAILED
+
+ def check_auth_password(self, username, password):
+ """
+ Determine if a given username and password supplied by the client is
+ acceptable for use in authentication.
+
+ Return ``AUTH_FAILED`` if the password is not accepted,
+ ``AUTH_SUCCESSFUL`` if the password is accepted and completes
+ the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
+ authentication is stateful, and this key is accepted for
+ authentication, but more authentication is required. (In this latter
+ case, `get_allowed_auths` will be called to report to the client what
+ options it has for continuing the authentication.)
+
+ The default implementation always returns ``AUTH_FAILED``.
+
+ :param str username: the username of the authenticating client.
+ :param str password: the password given by the client.
+ :return:
+ ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
+ it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the password auth is
+ successful, but authentication must continue.
+ :rtype: int
+ """
+ return AUTH_FAILED
+
+ def check_auth_publickey(self, username, key):
+ """
+ Determine if a given key supplied by the client is acceptable for use
+ in authentication. You should override this method in server mode to
+ check the username and key and decide if you would accept a signature
+ made using this key.
+
+ Return ``AUTH_FAILED`` if the key is not accepted,
+ ``AUTH_SUCCESSFUL`` if the key is accepted and completes the
+ authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
+ authentication is stateful, and this password is accepted for
+ authentication, but more authentication is required. (In this latter
+ case, `get_allowed_auths` will be called to report to the client what
+ options it has for continuing the authentication.)
+
+ Note that you don't have to actually verify any key signtature here.
+ If you're willing to accept the key, Paramiko will do the work of
+ verifying the client's signature.
+
+ The default implementation always returns ``AUTH_FAILED``.
+
+ :param str username: the username of the authenticating client
+ :param .PKey key: the key object provided by the client
+ :return:
+ ``AUTH_FAILED`` if the client can't authenticate with this key;
+ ``AUTH_SUCCESSFUL`` if it can; ``AUTH_PARTIALLY_SUCCESSFUL`` if it
+ can authenticate with this key but must continue with
+ authentication
+ :rtype: int
+ """
+ return AUTH_FAILED
+
+ def check_auth_interactive(self, username, submethods):
+ """
+ Begin an interactive authentication challenge, if supported. You
+ should override this method in server mode if you want to support the
+ ``"keyboard-interactive"`` auth type, which requires you to send a
+ series of questions for the client to answer.
+
+ Return ``AUTH_FAILED`` if this auth method isn't supported. Otherwise,
+ you should return an `.InteractiveQuery` object containing the prompts
+ and instructions for the user. The response will be sent via a call
+ to `check_auth_interactive_response`.
+
+ The default implementation always returns ``AUTH_FAILED``.
+
+ :param str username: the username of the authenticating client
+ :param str submethods:
+ a comma-separated list of methods preferred by the client (usually
+ empty)
+ :return:
+ ``AUTH_FAILED`` if this auth method isn't supported; otherwise an
+ object containing queries for the user
+ :rtype: int or `.InteractiveQuery`
+ """
+ return AUTH_FAILED
+
+ def check_auth_interactive_response(self, responses):
+ """
+ Continue or finish an interactive authentication challenge, if
+ supported. You should override this method in server mode if you want
+ to support the ``"keyboard-interactive"`` auth type.
+
+ Return ``AUTH_FAILED`` if the responses are not accepted,
+ ``AUTH_SUCCESSFUL`` if the responses are accepted and complete
+ the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
+ authentication is stateful, and this set of responses is accepted for
+ authentication, but more authentication is required. (In this latter
+ case, `get_allowed_auths` will be called to report to the client what
+ options it has for continuing the authentication.)
+
+ If you wish to continue interactive authentication with more questions,
+ you may return an `.InteractiveQuery` object, which should cause the
+ client to respond with more answers, calling this method again. This
+ cycle can continue indefinitely.
+
+ The default implementation always returns ``AUTH_FAILED``.
+
+ :param responses: list of `str` responses from the client
+ :return:
+ ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
+ it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the interactive auth
+ is successful, but authentication must continue; otherwise an
+ object containing queries for the user
+ :rtype: int or `.InteractiveQuery`
+ """
+ return AUTH_FAILED
+
+ def check_auth_gssapi_with_mic(
+ self, username, gss_authenticated=AUTH_FAILED, cc_file=None
+ ):
+ """
+ Authenticate the given user to the server if he is a valid krb5
+ principal.
+
+ :param str username: The username of the authenticating client
+ :param int gss_authenticated: The result of the krb5 authentication
+ :param str cc_filename: The krb5 client credentials cache filename
+ :return: ``AUTH_FAILED`` if the user is not authenticated otherwise
+ ``AUTH_SUCCESSFUL``
+ :rtype: int
+ :note: Kerberos credential delegation is not supported.
+ :see: `.ssh_gss`
+ :note: : We are just checking in L{AuthHandler} that the given user is
+ a valid krb5 principal!
+ We don't check if the krb5 principal is allowed to log in on
+ the server, because there is no way to do that in python. So
+ if you develop your own SSH server with paramiko for a certain
+ platform like Linux, you should call C{krb5_kuserok()} in
+ your local kerberos library to make sure that the
+ krb5_principal has an account on the server and is allowed to
+ log in as a user.
+ :see: http://www.unix.com/man-page/all/3/krb5_kuserok/
+ """
+ if gss_authenticated == AUTH_SUCCESSFUL:
+ return AUTH_SUCCESSFUL
+ return AUTH_FAILED
+
+ def check_auth_gssapi_keyex(
+ self, username, gss_authenticated=AUTH_FAILED, cc_file=None
+ ):
+ """
+ Authenticate the given user to the server if he is a valid krb5
+ principal and GSS-API Key Exchange was performed.
+ If GSS-API Key Exchange was not performed, this authentication method
+ won't be available.
+
+ :param str username: The username of the authenticating client
+ :param int gss_authenticated: The result of the krb5 authentication
+ :param str cc_filename: The krb5 client credentials cache filename
+ :return: ``AUTH_FAILED`` if the user is not authenticated otherwise
+ ``AUTH_SUCCESSFUL``
+ :rtype: int
+ :note: Kerberos credential delegation is not supported.
+ :see: `.ssh_gss` `.kex_gss`
+ :note: : We are just checking in L{AuthHandler} that the given user is
+ a valid krb5 principal!
+ We don't check if the krb5 principal is allowed to log in on
+ the server, because there is no way to do that in python. So
+ if you develop your own SSH server with paramiko for a certain
+ platform like Linux, you should call C{krb5_kuserok()} in
+ your local kerberos library to make sure that the
+ krb5_principal has an account on the server and is allowed
+ to log in as a user.
+ :see: http://www.unix.com/man-page/all/3/krb5_kuserok/
+ """
+ if gss_authenticated == AUTH_SUCCESSFUL:
+ return AUTH_SUCCESSFUL
+ return AUTH_FAILED
+
+ def enable_auth_gssapi(self):
+ """
+ Overwrite this function in your SSH server to enable GSSAPI
+ authentication.
+ The default implementation always returns false.
+
+ :returns bool: Whether GSSAPI authentication is enabled.
+ :see: `.ssh_gss`
+ """
+ UseGSSAPI = False
+ return UseGSSAPI
+
+ def check_port_forward_request(self, address, port):
+ """
+ Handle a request for port forwarding. The client is asking that
+ connections to the given address and port be forwarded back across
+ this ssh connection. An address of ``"0.0.0.0"`` indicates a global
+ address (any address associated with this server) and a port of ``0``
+ indicates that no specific port is requested (usually the OS will pick
+ a port).
+
+ The default implementation always returns ``False``, rejecting the
+ port forwarding request. If the request is accepted, you should return
+ the port opened for listening.
+
+ :param str address: the requested address
+ :param int port: the requested port
+ :return:
+ the port number (`int`) that was opened for listening, or ``False``
+ to reject
+ """
+ return False
+
+ def cancel_port_forward_request(self, address, port):
+ """
+ The client would like to cancel a previous port-forwarding request.
+ If the given address and port is being forwarded across this ssh
+ connection, the port should be closed.
+
+ :param str address: the forwarded address
+ :param int port: the forwarded port
+ """
+ pass
+
+ def check_global_request(self, kind, msg):
+ """
+ Handle a global request of the given ``kind``. This method is called
+ in server mode and client mode, whenever the remote host makes a global
+ request. If there are any arguments to the request, they will be in
+ ``msg``.
+
+ There aren't any useful global requests defined, aside from port
+ forwarding, so usually this type of request is an extension to the
+ protocol.
+
+ If the request was successful and you would like to return contextual
+ data to the remote host, return a tuple. Items in the tuple will be
+ sent back with the successful result. (Note that the items in the
+ tuple can only be strings, ints, or bools.)
+
+ The default implementation always returns ``False``, indicating that it
+ does not support any global requests.
+
+ .. note:: Port forwarding requests are handled separately, in
+ `check_port_forward_request`.
+
+ :param str kind: the kind of global request being made.
+ :param .Message msg: any extra arguments to the request.
+ :return:
+ ``True`` or a `tuple` of data if the request was granted; ``False``
+ otherwise.
+ """
+ return False
+
+ # ...Channel requests...
+
+ def check_channel_pty_request(
+ self, channel, term, width, height, pixelwidth, pixelheight, modes
+ ):
+ """
+ Determine if a pseudo-terminal of the given dimensions (usually
+ requested for shell access) can be provided on the given channel.
+
+ The default implementation always returns ``False``.
+
+ :param .Channel channel: the `.Channel` the pty request arrived on.
+ :param str term: type of terminal requested (for example, ``"vt100"``).
+ :param int width: width of screen in characters.
+ :param int height: height of screen in characters.
+ :param int pixelwidth:
+ width of screen in pixels, if known (may be ``0`` if unknown).
+ :param int pixelheight:
+ height of screen in pixels, if known (may be ``0`` if unknown).
+ :return:
+ ``True`` if the pseudo-terminal has been allocated; ``False``
+ otherwise.
+ """
+ return False
+
+ def check_channel_shell_request(self, channel):
+ """
+ Determine if a shell will be provided to the client on the given
+ channel. If this method returns ``True``, the channel should be
+ connected to the stdin/stdout of a shell (or something that acts like
+ a shell).
+
+ The default implementation always returns ``False``.
+
+ :param .Channel channel: the `.Channel` the request arrived on.
+ :return:
+ ``True`` if this channel is now hooked up to a shell; ``False`` if
+ a shell can't or won't be provided.
+ """
+ return False
+
+ def check_channel_exec_request(self, channel, command):
+ """
+ Determine if a shell command will be executed for the client. If this
+ method returns ``True``, the channel should be connected to the stdin,
+ stdout, and stderr of the shell command.
+
+ The default implementation always returns ``False``.
+
+ :param .Channel channel: the `.Channel` the request arrived on.
+ :param str command: the command to execute.
+ :return:
+ ``True`` if this channel is now hooked up to the stdin, stdout, and
+ stderr of the executing command; ``False`` if the command will not
+ be executed.
+
+ .. versionadded:: 1.1
+ """
+ return False
+
+ def check_channel_subsystem_request(self, channel, name):
+ """
+ Determine if a requested subsystem will be provided to the client on
+ the given channel. If this method returns ``True``, all future I/O
+ through this channel will be assumed to be connected to the requested
+ subsystem. An example of a subsystem is ``sftp``.
+
+ The default implementation checks for a subsystem handler assigned via
+ `.Transport.set_subsystem_handler`.
+ If one has been set, the handler is invoked and this method returns
+ ``True``. Otherwise it returns ``False``.
+
+ .. note:: Because the default implementation uses the `.Transport` to
+ identify valid subsystems, you probably won't need to override this
+ method.
+
+ :param .Channel channel: the `.Channel` the pty request arrived on.
+ :param str name: name of the requested subsystem.
+ :return:
+ ``True`` if this channel is now hooked up to the requested
+ subsystem; ``False`` if that subsystem can't or won't be provided.
+ """
+ transport = channel.get_transport()
+ handler_class, args, kwargs = transport._get_subsystem_handler(name)
+ if handler_class is None:
+ return False
+ handler = handler_class(channel, name, self, *args, **kwargs)
+ handler.start()
+ return True
+
+ def check_channel_window_change_request(
+ self, channel, width, height, pixelwidth, pixelheight
+ ):
+ """
+ Determine if the pseudo-terminal on the given channel can be resized.
+ This only makes sense if a pty was previously allocated on it.
+
+ The default implementation always returns ``False``.
+
+ :param .Channel channel: the `.Channel` the pty request arrived on.
+ :param int width: width of screen in characters.
+ :param int height: height of screen in characters.
+ :param int pixelwidth:
+ width of screen in pixels, if known (may be ``0`` if unknown).
+ :param int pixelheight:
+ height of screen in pixels, if known (may be ``0`` if unknown).
+ :return: ``True`` if the terminal was resized; ``False`` if not.
+ """
+ return False
+
+ def check_channel_x11_request(
+ self,
+ channel,
+ single_connection,
+ auth_protocol,
+ auth_cookie,
+ screen_number,
+ ):
+ """
+ Determine if the client will be provided with an X11 session. If this
+ method returns ``True``, X11 applications should be routed through new
+ SSH channels, using `.Transport.open_x11_channel`.
+
+ The default implementation always returns ``False``.
+
+ :param .Channel channel: the `.Channel` the X11 request arrived on
+ :param bool single_connection:
+ ``True`` if only a single X11 channel should be opened, else
+ ``False``.
+ :param str auth_protocol: the protocol used for X11 authentication
+ :param str auth_cookie: the cookie used to authenticate to X11
+ :param int screen_number: the number of the X11 screen to connect to
+ :return: ``True`` if the X11 session was opened; ``False`` if not
+ """
+ return False
+
+ def check_channel_forward_agent_request(self, channel):
+ """
+ Determine if the client will be provided with an forward agent session.
+ If this method returns ``True``, the server will allow SSH Agent
+ forwarding.
+
+ The default implementation always returns ``False``.
+
+ :param .Channel channel: the `.Channel` the request arrived on
+ :return: ``True`` if the AgentForward was loaded; ``False`` if not
+
+ If ``True`` is returned, the server should create an
+ :class:`AgentServerProxy` to access the agent.
+ """
+ return False
+
+ def check_channel_direct_tcpip_request(self, chanid, origin, destination):
+ """
+ Determine if a local port forwarding channel will be granted, and
+ return ``OPEN_SUCCEEDED`` or an error code. This method is
+ called in server mode when the client requests a channel, after
+ authentication is complete.
+
+ The ``chanid`` parameter is a small number that uniquely identifies the
+ channel within a `.Transport`. A `.Channel` object is not created
+ unless this method returns ``OPEN_SUCCEEDED`` -- once a
+ `.Channel` object is created, you can call `.Channel.get_id` to
+ retrieve the channel ID.
+
+ The origin and destination parameters are (ip_address, port) tuples
+ that correspond to both ends of the TCP connection in the forwarding
+ tunnel.
+
+ The return value should either be ``OPEN_SUCCEEDED`` (or
+ ``0``) to allow the channel request, or one of the following error
+ codes to reject it:
+
+ - ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
+ - ``OPEN_FAILED_CONNECT_FAILED``
+ - ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
+ - ``OPEN_FAILED_RESOURCE_SHORTAGE``
+
+ The default implementation always returns
+ ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
+
+ :param int chanid: ID of the channel
+ :param tuple origin:
+ 2-tuple containing the IP address and port of the originator
+ (client side)
+ :param tuple destination:
+ 2-tuple containing the IP address and port of the destination
+ (server side)
+ :return: an `int` success or failure code (listed above)
+ """
+ return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+
+ def check_channel_env_request(self, channel, name, value):
+ """
+ Check whether a given environment variable can be specified for the
+ given channel. This method should return ``True`` if the server
+ is willing to set the specified environment variable. Note that
+ some environment variables (e.g., PATH) can be exceedingly
+ dangerous, so blindly allowing the client to set the environment
+ is almost certainly not a good idea.
+
+ The default implementation always returns ``False``.
+
+ :param channel: the `.Channel` the env request arrived on
+ :param str name: name
+ :param str value: Channel value
+ :returns: A boolean
+ """
+ return False
+
+ def get_banner(self):
+ """
+ A pre-login banner to display to the user. The message may span
+ multiple lines separated by crlf pairs. The language should be in
+ rfc3066 style, for example: en-US
+
+ The default implementation always returns ``(None, None)``.
+
+ :returns: A tuple containing the banner and language code.
+
+ .. versionadded:: 2.3
+ """
+ return (None, None)
+
+
+class InteractiveQuery:
+ """
+ A query (set of prompts) for a user during interactive authentication.
+ """
+
+ def __init__(self, name="", instructions="", *prompts):
+ """
+ Create a new interactive query to send to the client. The name and
+ instructions are optional, but are generally displayed to the end
+ user. A list of prompts may be included, or they may be added via
+ the `add_prompt` method.
+
+ :param str name: name of this query
+ :param str instructions:
+ user instructions (usually short) about this query
+ :param str prompts: one or more authentication prompts
+ """
+ self.name = name
+ self.instructions = instructions
+ self.prompts = []
+ for x in prompts:
+ if isinstance(x, str):
+ self.add_prompt(x)
+ else:
+ self.add_prompt(x[0], x[1])
+
+ def add_prompt(self, prompt, echo=True):
+ """
+ Add a prompt to this query. The prompt should be a (reasonably short)
+ string. Multiple prompts can be added to the same query.
+
+ :param str prompt: the user prompt
+ :param bool echo:
+ ``True`` (default) if the user's response should be echoed;
+ ``False`` if not (for a password or similar)
+ """
+ self.prompts.append((prompt, echo))
+
+
+class SubsystemHandler(threading.Thread):
+ """
+ Handler for a subsystem in server mode. If you create a subclass of this
+ class and pass it to `.Transport.set_subsystem_handler`, an object of this
+ class will be created for each request for this subsystem. Each new object
+ will be executed within its own new thread by calling `start_subsystem`.
+ When that method completes, the channel is closed.
+
+ For example, if you made a subclass ``MP3Handler`` and registered it as the
+ handler for subsystem ``"mp3"``, then whenever a client has successfully
+ authenticated and requests subsystem ``"mp3"``, an object of class
+ ``MP3Handler`` will be created, and `start_subsystem` will be called on
+ it from a new thread.
+ """
+
+ def __init__(self, channel, name, server):
+ """
+ Create a new handler for a channel. This is used by `.ServerInterface`
+ to start up a new handler when a channel requests this subsystem. You
+ don't need to override this method, but if you do, be sure to pass the
+ ``channel`` and ``name`` parameters through to the original
+ ``__init__`` method here.
+
+ :param .Channel channel: the channel associated with this
+ subsystem request.
+ :param str name: name of the requested subsystem.
+ :param .ServerInterface server:
+ the server object for the session that started this subsystem
+ """
+ threading.Thread.__init__(self, target=self._run)
+ self.__channel = channel
+ self.__transport = channel.get_transport()
+ self.__name = name
+ self.__server = server
+
+ def get_server(self):
+ """
+ Return the `.ServerInterface` object associated with this channel and
+ subsystem.
+ """
+ return self.__server
+
+ def _run(self):
+ try:
+ self.__transport._log(
+ DEBUG, "Starting handler for subsystem {}".format(self.__name)
+ )
+ self.start_subsystem(self.__name, self.__transport, self.__channel)
+ except Exception as e:
+ self.__transport._log(
+ ERROR,
+ 'Exception in subsystem handler for "{}": {}'.format(
+ self.__name, e
+ ),
+ )
+ self.__transport._log(ERROR, util.tb_strings())
+ try:
+ self.finish_subsystem()
+ except:
+ pass
+
+ def start_subsystem(self, name, transport, channel):
+ """
+ Process an ssh subsystem in server mode. This method is called on a
+ new object (and in a new thread) for each subsystem request. It is
+ assumed that all subsystem logic will take place here, and when the
+ subsystem is finished, this method will return. After this method
+ returns, the channel is closed.
+
+ The combination of ``transport`` and ``channel`` are unique; this
+ handler corresponds to exactly one `.Channel` on one `.Transport`.
+
+ .. note::
+ It is the responsibility of this method to exit if the underlying
+ `.Transport` is closed. This can be done by checking
+ `.Transport.is_active` or noticing an EOF on the `.Channel`. If
+ this method loops forever without checking for this case, your
+ Python interpreter may refuse to exit because this thread will
+ still be running.
+
+ :param str name: name of the requested subsystem.
+ :param .Transport transport: the server-mode `.Transport`.
+ :param .Channel channel: the channel associated with this subsystem
+ request.
+ """
+ pass
+
+ def finish_subsystem(self):
+ """
+ Perform any cleanup at the end of a subsystem. The default
+ implementation just closes the channel.
+
+ .. versionadded:: 1.1
+ """
+ self.__channel.close()
diff --git a/paramiko/sftp.py b/paramiko/sftp.py
new file mode 100644
index 0000000..b3528d4
--- /dev/null
+++ b/paramiko/sftp.py
@@ -0,0 +1,224 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import select
+import socket
+import struct
+
+from paramiko import util
+from paramiko.common import DEBUG, byte_chr, byte_ord
+from paramiko.message import Message
+
+
+(
+ CMD_INIT,
+ CMD_VERSION,
+ CMD_OPEN,
+ CMD_CLOSE,
+ CMD_READ,
+ CMD_WRITE,
+ CMD_LSTAT,
+ CMD_FSTAT,
+ CMD_SETSTAT,
+ CMD_FSETSTAT,
+ CMD_OPENDIR,
+ CMD_READDIR,
+ CMD_REMOVE,
+ CMD_MKDIR,
+ CMD_RMDIR,
+ CMD_REALPATH,
+ CMD_STAT,
+ CMD_RENAME,
+ CMD_READLINK,
+ CMD_SYMLINK,
+) = range(1, 21)
+(CMD_STATUS, CMD_HANDLE, CMD_DATA, CMD_NAME, CMD_ATTRS) = range(101, 106)
+(CMD_EXTENDED, CMD_EXTENDED_REPLY) = range(200, 202)
+
+SFTP_OK = 0
+(
+ SFTP_EOF,
+ SFTP_NO_SUCH_FILE,
+ SFTP_PERMISSION_DENIED,
+ SFTP_FAILURE,
+ SFTP_BAD_MESSAGE,
+ SFTP_NO_CONNECTION,
+ SFTP_CONNECTION_LOST,
+ SFTP_OP_UNSUPPORTED,
+) = range(1, 9)
+
+SFTP_DESC = [
+ "Success",
+ "End of file",
+ "No such file",
+ "Permission denied",
+ "Failure",
+ "Bad message",
+ "No connection",
+ "Connection lost",
+ "Operation unsupported",
+]
+
+SFTP_FLAG_READ = 0x1
+SFTP_FLAG_WRITE = 0x2
+SFTP_FLAG_APPEND = 0x4
+SFTP_FLAG_CREATE = 0x8
+SFTP_FLAG_TRUNC = 0x10
+SFTP_FLAG_EXCL = 0x20
+
+_VERSION = 3
+
+
+# for debugging
+CMD_NAMES = {
+ CMD_INIT: "init",
+ CMD_VERSION: "version",
+ CMD_OPEN: "open",
+ CMD_CLOSE: "close",
+ CMD_READ: "read",
+ CMD_WRITE: "write",
+ CMD_LSTAT: "lstat",
+ CMD_FSTAT: "fstat",
+ CMD_SETSTAT: "setstat",
+ CMD_FSETSTAT: "fsetstat",
+ CMD_OPENDIR: "opendir",
+ CMD_READDIR: "readdir",
+ CMD_REMOVE: "remove",
+ CMD_MKDIR: "mkdir",
+ CMD_RMDIR: "rmdir",
+ CMD_REALPATH: "realpath",
+ CMD_STAT: "stat",
+ CMD_RENAME: "rename",
+ CMD_READLINK: "readlink",
+ CMD_SYMLINK: "symlink",
+ CMD_STATUS: "status",
+ CMD_HANDLE: "handle",
+ CMD_DATA: "data",
+ CMD_NAME: "name",
+ CMD_ATTRS: "attrs",
+ CMD_EXTENDED: "extended",
+ CMD_EXTENDED_REPLY: "extended_reply",
+}
+
+
+# TODO: rewrite SFTP file/server modules' overly-flexible "make a request with
+# xyz components" so we don't need this very silly method of signaling whether
+# a given Python integer should be 32- or 64-bit.
+# NOTE: this only became an issue when dropping Python 2 support; prior to
+# doing so, we had to support actual-longs, which served as that signal. This
+# is simply recreating that structure in a more tightly scoped fashion.
+class int64(int):
+ pass
+
+
+class SFTPError(Exception):
+ pass
+
+
+class BaseSFTP:
+ def __init__(self):
+ self.logger = util.get_logger("paramiko.sftp")
+ self.sock = None
+ self.ultra_debug = False
+
+ # ...internals...
+
+ def _send_version(self):
+ m = Message()
+ m.add_int(_VERSION)
+ self._send_packet(CMD_INIT, m)
+ t, data = self._read_packet()
+ if t != CMD_VERSION:
+ raise SFTPError("Incompatible sftp protocol")
+ version = struct.unpack(">I", data[:4])[0]
+ # if version != _VERSION:
+ # raise SFTPError('Incompatible sftp protocol')
+ return version
+
+ def _send_server_version(self):
+ # winscp will freak out if the server sends version info before the
+ # client finishes sending INIT.
+ t, data = self._read_packet()
+ if t != CMD_INIT:
+ raise SFTPError("Incompatible sftp protocol")
+ version = struct.unpack(">I", data[:4])[0]
+ # advertise that we support "check-file"
+ extension_pairs = ["check-file", "md5,sha1"]
+ msg = Message()
+ msg.add_int(_VERSION)
+ msg.add(*extension_pairs)
+ self._send_packet(CMD_VERSION, msg)
+ return version
+
+ def _log(self, level, msg, *args):
+ self.logger.log(level, msg, *args)
+
+ def _write_all(self, out):
+ while len(out) > 0:
+ n = self.sock.send(out)
+ if n <= 0:
+ raise EOFError()
+ if n == len(out):
+ return
+ out = out[n:]
+ return
+
+ def _read_all(self, n):
+ out = bytes()
+ while n > 0:
+ if isinstance(self.sock, socket.socket):
+ # sometimes sftp is used directly over a socket instead of
+ # through a paramiko channel. in this case, check periodically
+ # if the socket is closed. (for some reason, recv() won't ever
+ # return or raise an exception, but calling select on a closed
+ # socket will.)
+ while True:
+ read, write, err = select.select([self.sock], [], [], 0.1)
+ if len(read) > 0:
+ x = self.sock.recv(n)
+ break
+ else:
+ x = self.sock.recv(n)
+
+ if len(x) == 0:
+ raise EOFError()
+ out += x
+ n -= len(x)
+ return out
+
+ def _send_packet(self, t, packet):
+ packet = packet.asbytes()
+ out = struct.pack(">I", len(packet) + 1) + byte_chr(t) + packet
+ if self.ultra_debug:
+ self._log(DEBUG, util.format_binary(out, "OUT: "))
+ self._write_all(out)
+
+ def _read_packet(self):
+ x = self._read_all(4)
+ # most sftp servers won't accept packets larger than about 32k, so
+ # anything with the high byte set (> 16MB) is just garbage.
+ if byte_ord(x[0]):
+ raise SFTPError("Garbage packet received")
+ size = struct.unpack(">I", x)[0]
+ data = self._read_all(size)
+ if self.ultra_debug:
+ self._log(DEBUG, util.format_binary(data, "IN: "))
+ if size > 0:
+ t = byte_ord(data[0])
+ return t, data[1:]
+ return 0, bytes()
diff --git a/paramiko/sftp_attr.py b/paramiko/sftp_attr.py
new file mode 100644
index 0000000..18ffbf8
--- /dev/null
+++ b/paramiko/sftp_attr.py
@@ -0,0 +1,239 @@
+# Copyright (C) 2003-2006 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import stat
+import time
+from paramiko.common import x80000000, o700, o70, xffffffff
+
+
+class SFTPAttributes:
+ """
+ Representation of the attributes of a file (or proxied file) for SFTP in
+ client or server mode. It attempts to mirror the object returned by
+ `os.stat` as closely as possible, so it may have the following fields,
+ with the same meanings as those returned by an `os.stat` object:
+
+ - ``st_size``
+ - ``st_uid``
+ - ``st_gid``
+ - ``st_mode``
+ - ``st_atime``
+ - ``st_mtime``
+
+ Because SFTP allows flags to have other arbitrary named attributes, these
+ are stored in a dict named ``attr``. Occasionally, the filename is also
+ stored, in ``filename``.
+ """
+
+ FLAG_SIZE = 1
+ FLAG_UIDGID = 2
+ FLAG_PERMISSIONS = 4
+ FLAG_AMTIME = 8
+ FLAG_EXTENDED = x80000000
+
+ def __init__(self):
+ """
+ Create a new (empty) SFTPAttributes object. All fields will be empty.
+ """
+ self._flags = 0
+ self.st_size = None
+ self.st_uid = None
+ self.st_gid = None
+ self.st_mode = None
+ self.st_atime = None
+ self.st_mtime = None
+ self.attr = {}
+
+ @classmethod
+ def from_stat(cls, obj, filename=None):
+ """
+ Create an `.SFTPAttributes` object from an existing ``stat`` object (an
+ object returned by `os.stat`).
+
+ :param object obj: an object returned by `os.stat` (or equivalent).
+ :param str filename: the filename associated with this file.
+ :return: new `.SFTPAttributes` object with the same attribute fields.
+ """
+ attr = cls()
+ attr.st_size = obj.st_size
+ attr.st_uid = obj.st_uid
+ attr.st_gid = obj.st_gid
+ attr.st_mode = obj.st_mode
+ attr.st_atime = obj.st_atime
+ attr.st_mtime = obj.st_mtime
+ if filename is not None:
+ attr.filename = filename
+ return attr
+
+ def __repr__(self):
+ return "<SFTPAttributes: {}>".format(self._debug_str())
+
+ # ...internals...
+ @classmethod
+ def _from_msg(cls, msg, filename=None, longname=None):
+ attr = cls()
+ attr._unpack(msg)
+ if filename is not None:
+ attr.filename = filename
+ if longname is not None:
+ attr.longname = longname
+ return attr
+
+ def _unpack(self, msg):
+ self._flags = msg.get_int()
+ if self._flags & self.FLAG_SIZE:
+ self.st_size = msg.get_int64()
+ if self._flags & self.FLAG_UIDGID:
+ self.st_uid = msg.get_int()
+ self.st_gid = msg.get_int()
+ if self._flags & self.FLAG_PERMISSIONS:
+ self.st_mode = msg.get_int()
+ if self._flags & self.FLAG_AMTIME:
+ self.st_atime = msg.get_int()
+ self.st_mtime = msg.get_int()
+ if self._flags & self.FLAG_EXTENDED:
+ count = msg.get_int()
+ for i in range(count):
+ self.attr[msg.get_string()] = msg.get_string()
+
+ def _pack(self, msg):
+ self._flags = 0
+ if self.st_size is not None:
+ self._flags |= self.FLAG_SIZE
+ if (self.st_uid is not None) and (self.st_gid is not None):
+ self._flags |= self.FLAG_UIDGID
+ if self.st_mode is not None:
+ self._flags |= self.FLAG_PERMISSIONS
+ if (self.st_atime is not None) and (self.st_mtime is not None):
+ self._flags |= self.FLAG_AMTIME
+ if len(self.attr) > 0:
+ self._flags |= self.FLAG_EXTENDED
+ msg.add_int(self._flags)
+ if self._flags & self.FLAG_SIZE:
+ msg.add_int64(self.st_size)
+ if self._flags & self.FLAG_UIDGID:
+ msg.add_int(self.st_uid)
+ msg.add_int(self.st_gid)
+ if self._flags & self.FLAG_PERMISSIONS:
+ msg.add_int(self.st_mode)
+ if self._flags & self.FLAG_AMTIME:
+ # throw away any fractional seconds
+ msg.add_int(int(self.st_atime))
+ msg.add_int(int(self.st_mtime))
+ if self._flags & self.FLAG_EXTENDED:
+ msg.add_int(len(self.attr))
+ for key, val in self.attr.items():
+ msg.add_string(key)
+ msg.add_string(val)
+ return
+
+ def _debug_str(self):
+ out = "[ "
+ if self.st_size is not None:
+ out += "size={} ".format(self.st_size)
+ if (self.st_uid is not None) and (self.st_gid is not None):
+ out += "uid={} gid={} ".format(self.st_uid, self.st_gid)
+ if self.st_mode is not None:
+ out += "mode=" + oct(self.st_mode) + " "
+ if (self.st_atime is not None) and (self.st_mtime is not None):
+ out += "atime={} mtime={} ".format(self.st_atime, self.st_mtime)
+ for k, v in self.attr.items():
+ out += '"{}"={!r} '.format(str(k), v)
+ out += "]"
+ return out
+
+ @staticmethod
+ def _rwx(n, suid, sticky=False):
+ if suid:
+ suid = 2
+ out = "-r"[n >> 2] + "-w"[(n >> 1) & 1]
+ if sticky:
+ out += "-xTt"[suid + (n & 1)]
+ else:
+ out += "-xSs"[suid + (n & 1)]
+ return out
+
+ def __str__(self):
+ """create a unix-style long description of the file (like ls -l)"""
+ if self.st_mode is not None:
+ kind = stat.S_IFMT(self.st_mode)
+ if kind == stat.S_IFIFO:
+ ks = "p"
+ elif kind == stat.S_IFCHR:
+ ks = "c"
+ elif kind == stat.S_IFDIR:
+ ks = "d"
+ elif kind == stat.S_IFBLK:
+ ks = "b"
+ elif kind == stat.S_IFREG:
+ ks = "-"
+ elif kind == stat.S_IFLNK:
+ ks = "l"
+ elif kind == stat.S_IFSOCK:
+ ks = "s"
+ else:
+ ks = "?"
+ ks += self._rwx(
+ (self.st_mode & o700) >> 6, self.st_mode & stat.S_ISUID
+ )
+ ks += self._rwx(
+ (self.st_mode & o70) >> 3, self.st_mode & stat.S_ISGID
+ )
+ ks += self._rwx(
+ self.st_mode & 7, self.st_mode & stat.S_ISVTX, True
+ )
+ else:
+ ks = "?---------"
+ # compute display date
+ if (self.st_mtime is None) or (self.st_mtime == xffffffff):
+ # shouldn't really happen
+ datestr = "(unknown date)"
+ else:
+ time_tuple = time.localtime(self.st_mtime)
+ if abs(time.time() - self.st_mtime) > 15_552_000:
+ # (15,552,000s = 6 months)
+ datestr = time.strftime("%d %b %Y", time_tuple)
+ else:
+ datestr = time.strftime("%d %b %H:%M", time_tuple)
+ filename = getattr(self, "filename", "?")
+
+ # not all servers support uid/gid
+ uid = self.st_uid
+ gid = self.st_gid
+ size = self.st_size
+ if uid is None:
+ uid = 0
+ if gid is None:
+ gid = 0
+ if size is None:
+ size = 0
+
+ # TODO: not sure this actually worked as expected beforehand, leaving
+ # it untouched for the time being, re: .format() upgrade, until someone
+ # has time to doublecheck
+ return "%s 1 %-8d %-8d %8d %-12s %s" % (
+ ks,
+ uid,
+ gid,
+ size,
+ datestr,
+ filename,
+ )
+
+ def asbytes(self):
+ return str(self).encode()
diff --git a/paramiko/sftp_client.py b/paramiko/sftp_client.py
new file mode 100644
index 0000000..066cd83
--- /dev/null
+++ b/paramiko/sftp_client.py
@@ -0,0 +1,965 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of Paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from binascii import hexlify
+import errno
+import os
+import stat
+import threading
+import time
+import weakref
+from paramiko import util
+from paramiko.channel import Channel
+from paramiko.message import Message
+from paramiko.common import INFO, DEBUG, o777
+from paramiko.sftp import (
+ BaseSFTP,
+ CMD_OPENDIR,
+ CMD_HANDLE,
+ SFTPError,
+ CMD_READDIR,
+ CMD_NAME,
+ CMD_CLOSE,
+ SFTP_FLAG_READ,
+ SFTP_FLAG_WRITE,
+ SFTP_FLAG_CREATE,
+ SFTP_FLAG_TRUNC,
+ SFTP_FLAG_APPEND,
+ SFTP_FLAG_EXCL,
+ CMD_OPEN,
+ CMD_REMOVE,
+ CMD_RENAME,
+ CMD_MKDIR,
+ CMD_RMDIR,
+ CMD_STAT,
+ CMD_ATTRS,
+ CMD_LSTAT,
+ CMD_SYMLINK,
+ CMD_SETSTAT,
+ CMD_READLINK,
+ CMD_REALPATH,
+ CMD_STATUS,
+ CMD_EXTENDED,
+ SFTP_OK,
+ SFTP_EOF,
+ SFTP_NO_SUCH_FILE,
+ SFTP_PERMISSION_DENIED,
+ int64,
+)
+
+from paramiko.sftp_attr import SFTPAttributes
+from paramiko.ssh_exception import SSHException
+from paramiko.sftp_file import SFTPFile
+from paramiko.util import ClosingContextManager, b, u
+
+
+def _to_unicode(s):
+ """
+ decode a string as ascii or utf8 if possible (as required by the sftp
+ protocol). if neither works, just return a byte string because the server
+ probably doesn't know the filename's encoding.
+ """
+ try:
+ return s.encode("ascii")
+ except (UnicodeError, AttributeError):
+ try:
+ return s.decode("utf-8")
+ except UnicodeError:
+ return s
+
+
+b_slash = b"/"
+
+
+class SFTPClient(BaseSFTP, ClosingContextManager):
+ """
+ SFTP client object.
+
+ Used to open an SFTP session across an open SSH `.Transport` and perform
+ remote file operations.
+
+ Instances of this class may be used as context managers.
+ """
+
+ def __init__(self, sock):
+ """
+ Create an SFTP client from an existing `.Channel`. The channel
+ should already have requested the ``"sftp"`` subsystem.
+
+ An alternate way to create an SFTP client context is by using
+ `from_transport`.
+
+ :param .Channel sock: an open `.Channel` using the ``"sftp"`` subsystem
+
+ :raises:
+ `.SSHException` -- if there's an exception while negotiating sftp
+ """
+ BaseSFTP.__init__(self)
+ self.sock = sock
+ self.ultra_debug = False
+ self.request_number = 1
+ # lock for request_number
+ self._lock = threading.Lock()
+ self._cwd = None
+ # request # -> SFTPFile
+ self._expecting = weakref.WeakValueDictionary()
+ if type(sock) is Channel:
+ # override default logger
+ transport = self.sock.get_transport()
+ self.logger = util.get_logger(
+ transport.get_log_channel() + ".sftp"
+ )
+ self.ultra_debug = transport.get_hexdump()
+ try:
+ server_version = self._send_version()
+ except EOFError:
+ raise SSHException("EOF during negotiation")
+ self._log(
+ INFO,
+ "Opened sftp connection (server version {})".format(
+ server_version
+ ),
+ )
+
+ @classmethod
+ def from_transport(cls, t, window_size=None, max_packet_size=None):
+ """
+ Create an SFTP client channel from an open `.Transport`.
+
+ Setting the window and packet sizes might affect the transfer speed.
+ The default settings in the `.Transport` class are the same as in
+ OpenSSH and should work adequately for both files transfers and
+ interactive sessions.
+
+ :param .Transport t: an open `.Transport` which is already
+ authenticated
+ :param int window_size:
+ optional window size for the `.SFTPClient` session.
+ :param int max_packet_size:
+ optional max packet size for the `.SFTPClient` session..
+
+ :return:
+ a new `.SFTPClient` object, referring to an sftp session (channel)
+ across the transport
+
+ .. versionchanged:: 1.15
+ Added the ``window_size`` and ``max_packet_size`` arguments.
+ """
+ chan = t.open_session(
+ window_size=window_size, max_packet_size=max_packet_size
+ )
+ if chan is None:
+ return None
+ chan.invoke_subsystem("sftp")
+ return cls(chan)
+
+ def _log(self, level, msg, *args):
+ if isinstance(msg, list):
+ for m in msg:
+ self._log(level, m, *args)
+ else:
+ # NOTE: these bits MUST continue using %-style format junk because
+ # logging.Logger.log() explicitly requires it. Grump.
+ # escape '%' in msg (they could come from file or directory names)
+ # before logging
+ msg = msg.replace("%", "%%")
+ super()._log(
+ level,
+ "[chan %s] " + msg,
+ *([self.sock.get_name()] + list(args))
+ )
+
+ def close(self):
+ """
+ Close the SFTP session and its underlying channel.
+
+ .. versionadded:: 1.4
+ """
+ self._log(INFO, "sftp session closed.")
+ self.sock.close()
+
+ def get_channel(self):
+ """
+ Return the underlying `.Channel` object for this SFTP session. This
+ might be useful for doing things like setting a timeout on the channel.
+
+ .. versionadded:: 1.7.1
+ """
+ return self.sock
+
+ def listdir(self, path="."):
+ """
+ Return a list containing the names of the entries in the given
+ ``path``.
+
+ The list is in arbitrary order. It does not include the special
+ entries ``'.'`` and ``'..'`` even if they are present in the folder.
+ This method is meant to mirror ``os.listdir`` as closely as possible.
+ For a list of full `.SFTPAttributes` objects, see `listdir_attr`.
+
+ :param str path: path to list (defaults to ``'.'``)
+ """
+ return [f.filename for f in self.listdir_attr(path)]
+
+ def listdir_attr(self, path="."):
+ """
+ Return a list containing `.SFTPAttributes` objects corresponding to
+ files in the given ``path``. The list is in arbitrary order. It does
+ not include the special entries ``'.'`` and ``'..'`` even if they are
+ present in the folder.
+
+ The returned `.SFTPAttributes` objects will each have an additional
+ field: ``longname``, which may contain a formatted string of the file's
+ attributes, in unix format. The content of this string will probably
+ depend on the SFTP server implementation.
+
+ :param str path: path to list (defaults to ``'.'``)
+ :return: list of `.SFTPAttributes` objects
+
+ .. versionadded:: 1.2
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "listdir({!r})".format(path))
+ t, msg = self._request(CMD_OPENDIR, path)
+ if t != CMD_HANDLE:
+ raise SFTPError("Expected handle")
+ handle = msg.get_binary()
+ filelist = []
+ while True:
+ try:
+ t, msg = self._request(CMD_READDIR, handle)
+ except EOFError:
+ # done with handle
+ break
+ if t != CMD_NAME:
+ raise SFTPError("Expected name response")
+ count = msg.get_int()
+ for i in range(count):
+ filename = msg.get_text()
+ longname = msg.get_text()
+ attr = SFTPAttributes._from_msg(msg, filename, longname)
+ if (filename != ".") and (filename != ".."):
+ filelist.append(attr)
+ self._request(CMD_CLOSE, handle)
+ return filelist
+
+ def listdir_iter(self, path=".", read_aheads=50):
+ """
+ Generator version of `.listdir_attr`.
+
+ See the API docs for `.listdir_attr` for overall details.
+
+ This function adds one more kwarg on top of `.listdir_attr`:
+ ``read_aheads``, an integer controlling how many
+ ``SSH_FXP_READDIR`` requests are made to the server. The default of 50
+ should suffice for most file listings as each request/response cycle
+ may contain multiple files (dependent on server implementation.)
+
+ .. versionadded:: 1.15
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "listdir({!r})".format(path))
+ t, msg = self._request(CMD_OPENDIR, path)
+
+ if t != CMD_HANDLE:
+ raise SFTPError("Expected handle")
+
+ handle = msg.get_string()
+
+ nums = list()
+ while True:
+ try:
+ # Send out a bunch of readdir requests so that we can read the
+ # responses later on Section 6.7 of the SSH file transfer RFC
+ # explains this
+ # http://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
+ for i in range(read_aheads):
+ num = self._async_request(type(None), CMD_READDIR, handle)
+ nums.append(num)
+
+ # For each of our sent requests
+ # Read and parse the corresponding packets
+ # If we're at the end of our queued requests, then fire off
+ # some more requests
+ # Exit the loop when we've reached the end of the directory
+ # handle
+ for num in nums:
+ t, pkt_data = self._read_packet()
+ msg = Message(pkt_data)
+ new_num = msg.get_int()
+ if num == new_num:
+ if t == CMD_STATUS:
+ self._convert_status(msg)
+ count = msg.get_int()
+ for i in range(count):
+ filename = msg.get_text()
+ longname = msg.get_text()
+ attr = SFTPAttributes._from_msg(
+ msg, filename, longname
+ )
+ if (filename != ".") and (filename != ".."):
+ yield attr
+
+ # If we've hit the end of our queued requests, reset nums.
+ nums = list()
+
+ except EOFError:
+ self._request(CMD_CLOSE, handle)
+ return
+
+ def open(self, filename, mode="r", bufsize=-1):
+ """
+ Open a file on the remote server. The arguments are the same as for
+ Python's built-in `python:file` (aka `python:open`). A file-like
+ object is returned, which closely mimics the behavior of a normal
+ Python file object, including the ability to be used as a context
+ manager.
+
+ The mode indicates how the file is to be opened: ``'r'`` for reading,
+ ``'w'`` for writing (truncating an existing file), ``'a'`` for
+ appending, ``'r+'`` for reading/writing, ``'w+'`` for reading/writing
+ (truncating an existing file), ``'a+'`` for reading/appending. The
+ Python ``'b'`` flag is ignored, since SSH treats all files as binary.
+ The ``'U'`` flag is supported in a compatible way.
+
+ Since 1.5.2, an ``'x'`` flag indicates that the operation should only
+ succeed if the file was created and did not previously exist. This has
+ no direct mapping to Python's file flags, but is commonly known as the
+ ``O_EXCL`` flag in posix.
+
+ The file will be buffered in standard Python style by default, but
+ can be altered with the ``bufsize`` parameter. ``<=0`` turns off
+ buffering, ``1`` uses line buffering, and any number greater than 1
+ (``>1``) uses that specific buffer size.
+
+ :param str filename: name of the file to open
+ :param str mode: mode (Python-style) to open in
+ :param int bufsize: desired buffering (default: ``-1``)
+ :return: an `.SFTPFile` object representing the open file
+
+ :raises: ``IOError`` -- if the file could not be opened.
+ """
+ filename = self._adjust_cwd(filename)
+ self._log(DEBUG, "open({!r}, {!r})".format(filename, mode))
+ imode = 0
+ if ("r" in mode) or ("+" in mode):
+ imode |= SFTP_FLAG_READ
+ if ("w" in mode) or ("+" in mode) or ("a" in mode):
+ imode |= SFTP_FLAG_WRITE
+ if "w" in mode:
+ imode |= SFTP_FLAG_CREATE | SFTP_FLAG_TRUNC
+ if "a" in mode:
+ imode |= SFTP_FLAG_CREATE | SFTP_FLAG_APPEND
+ if "x" in mode:
+ imode |= SFTP_FLAG_CREATE | SFTP_FLAG_EXCL
+ attrblock = SFTPAttributes()
+ t, msg = self._request(CMD_OPEN, filename, imode, attrblock)
+ if t != CMD_HANDLE:
+ raise SFTPError("Expected handle")
+ handle = msg.get_binary()
+ self._log(
+ DEBUG,
+ "open({!r}, {!r}) -> {}".format(
+ filename, mode, u(hexlify(handle))
+ ),
+ )
+ return SFTPFile(self, handle, mode, bufsize)
+
+ # Python continues to vacillate about "open" vs "file"...
+ file = open
+
+ def remove(self, path):
+ """
+ Remove the file at the given path. This only works on files; for
+ removing folders (directories), use `rmdir`.
+
+ :param str path: path (absolute or relative) of the file to remove
+
+ :raises: ``IOError`` -- if the path refers to a folder (directory)
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "remove({!r})".format(path))
+ self._request(CMD_REMOVE, path)
+
+ unlink = remove
+
+ def rename(self, oldpath, newpath):
+ """
+ Rename a file or folder from ``oldpath`` to ``newpath``.
+
+ .. note::
+ This method implements 'standard' SFTP ``RENAME`` behavior; those
+ seeking the OpenSSH "POSIX rename" extension behavior should use
+ `posix_rename`.
+
+ :param str oldpath:
+ existing name of the file or folder
+ :param str newpath:
+ new name for the file or folder, must not exist already
+
+ :raises:
+ ``IOError`` -- if ``newpath`` is a folder, or something else goes
+ wrong
+ """
+ oldpath = self._adjust_cwd(oldpath)
+ newpath = self._adjust_cwd(newpath)
+ self._log(DEBUG, "rename({!r}, {!r})".format(oldpath, newpath))
+ self._request(CMD_RENAME, oldpath, newpath)
+
+ def posix_rename(self, oldpath, newpath):
+ """
+ Rename a file or folder from ``oldpath`` to ``newpath``, following
+ posix conventions.
+
+ :param str oldpath: existing name of the file or folder
+ :param str newpath: new name for the file or folder, will be
+ overwritten if it already exists
+
+ :raises:
+ ``IOError`` -- if ``newpath`` is a folder, posix-rename is not
+ supported by the server or something else goes wrong
+
+ :versionadded: 2.2
+ """
+ oldpath = self._adjust_cwd(oldpath)
+ newpath = self._adjust_cwd(newpath)
+ self._log(DEBUG, "posix_rename({!r}, {!r})".format(oldpath, newpath))
+ self._request(
+ CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath
+ )
+
+ def mkdir(self, path, mode=o777):
+ """
+ Create a folder (directory) named ``path`` with numeric mode ``mode``.
+ The default mode is 0777 (octal). On some systems, mode is ignored.
+ Where it is used, the current umask value is first masked out.
+
+ :param str path: name of the folder to create
+ :param int mode: permissions (posix-style) for the newly-created folder
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "mkdir({!r}, {!r})".format(path, mode))
+ attr = SFTPAttributes()
+ attr.st_mode = mode
+ self._request(CMD_MKDIR, path, attr)
+
+ def rmdir(self, path):
+ """
+ Remove the folder named ``path``.
+
+ :param str path: name of the folder to remove
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "rmdir({!r})".format(path))
+ self._request(CMD_RMDIR, path)
+
+ def stat(self, path):
+ """
+ Retrieve information about a file on the remote system. The return
+ value is an object whose attributes correspond to the attributes of
+ Python's ``stat`` structure as returned by ``os.stat``, except that it
+ contains fewer fields. An SFTP server may return as much or as little
+ info as it wants, so the results may vary from server to server.
+
+ Unlike a Python `python:stat` object, the result may not be accessed as
+ a tuple. This is mostly due to the author's slack factor.
+
+ The fields supported are: ``st_mode``, ``st_size``, ``st_uid``,
+ ``st_gid``, ``st_atime``, and ``st_mtime``.
+
+ :param str path: the filename to stat
+ :return:
+ an `.SFTPAttributes` object containing attributes about the given
+ file
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "stat({!r})".format(path))
+ t, msg = self._request(CMD_STAT, path)
+ if t != CMD_ATTRS:
+ raise SFTPError("Expected attributes")
+ return SFTPAttributes._from_msg(msg)
+
+ def lstat(self, path):
+ """
+ Retrieve information about a file on the remote system, without
+ following symbolic links (shortcuts). This otherwise behaves exactly
+ the same as `stat`.
+
+ :param str path: the filename to stat
+ :return:
+ an `.SFTPAttributes` object containing attributes about the given
+ file
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "lstat({!r})".format(path))
+ t, msg = self._request(CMD_LSTAT, path)
+ if t != CMD_ATTRS:
+ raise SFTPError("Expected attributes")
+ return SFTPAttributes._from_msg(msg)
+
+ def symlink(self, source, dest):
+ """
+ Create a symbolic link to the ``source`` path at ``destination``.
+
+ :param str source: path of the original file
+ :param str dest: path of the newly created symlink
+ """
+ dest = self._adjust_cwd(dest)
+ self._log(DEBUG, "symlink({!r}, {!r})".format(source, dest))
+ source = b(source)
+ self._request(CMD_SYMLINK, source, dest)
+
+ def chmod(self, path, mode):
+ """
+ Change the mode (permissions) of a file. The permissions are
+ unix-style and identical to those used by Python's `os.chmod`
+ function.
+
+ :param str path: path of the file to change the permissions of
+ :param int mode: new permissions
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "chmod({!r}, {!r})".format(path, mode))
+ attr = SFTPAttributes()
+ attr.st_mode = mode
+ self._request(CMD_SETSTAT, path, attr)
+
+ def chown(self, path, uid, gid):
+ """
+ Change the owner (``uid``) and group (``gid``) of a file. As with
+ Python's `os.chown` function, you must pass both arguments, so if you
+ only want to change one, use `stat` first to retrieve the current
+ owner and group.
+
+ :param str path: path of the file to change the owner and group of
+ :param int uid: new owner's uid
+ :param int gid: new group id
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "chown({!r}, {!r}, {!r})".format(path, uid, gid))
+ attr = SFTPAttributes()
+ attr.st_uid, attr.st_gid = uid, gid
+ self._request(CMD_SETSTAT, path, attr)
+
+ def utime(self, path, times):
+ """
+ Set the access and modified times of the file specified by ``path``.
+ If ``times`` is ``None``, then the file's access and modified times
+ are set to the current time. Otherwise, ``times`` must be a 2-tuple
+ of numbers, of the form ``(atime, mtime)``, which is used to set the
+ access and modified times, respectively. This bizarre API is mimicked
+ from Python for the sake of consistency -- I apologize.
+
+ :param str path: path of the file to modify
+ :param tuple times:
+ ``None`` or a tuple of (access time, modified time) in standard
+ internet epoch time (seconds since 01 January 1970 GMT)
+ """
+ path = self._adjust_cwd(path)
+ if times is None:
+ times = (time.time(), time.time())
+ self._log(DEBUG, "utime({!r}, {!r})".format(path, times))
+ attr = SFTPAttributes()
+ attr.st_atime, attr.st_mtime = times
+ self._request(CMD_SETSTAT, path, attr)
+
+ def truncate(self, path, size):
+ """
+ Change the size of the file specified by ``path``. This usually
+ extends or shrinks the size of the file, just like the `~file.truncate`
+ method on Python file objects.
+
+ :param str path: path of the file to modify
+ :param int size: the new size of the file
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "truncate({!r}, {!r})".format(path, size))
+ attr = SFTPAttributes()
+ attr.st_size = size
+ self._request(CMD_SETSTAT, path, attr)
+
+ def readlink(self, path):
+ """
+ Return the target of a symbolic link (shortcut). You can use
+ `symlink` to create these. The result may be either an absolute or
+ relative pathname.
+
+ :param str path: path of the symbolic link file
+ :return: target path, as a `str`
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "readlink({!r})".format(path))
+ t, msg = self._request(CMD_READLINK, path)
+ if t != CMD_NAME:
+ raise SFTPError("Expected name response")
+ count = msg.get_int()
+ if count == 0:
+ return None
+ if count != 1:
+ raise SFTPError("Readlink returned {} results".format(count))
+ return _to_unicode(msg.get_string())
+
+ def normalize(self, path):
+ """
+ Return the normalized path (on the server) of a given path. This
+ can be used to quickly resolve symbolic links or determine what the
+ server is considering to be the "current folder" (by passing ``'.'``
+ as ``path``).
+
+ :param str path: path to be normalized
+ :return: normalized form of the given path (as a `str`)
+
+ :raises: ``IOError`` -- if the path can't be resolved on the server
+ """
+ path = self._adjust_cwd(path)
+ self._log(DEBUG, "normalize({!r})".format(path))
+ t, msg = self._request(CMD_REALPATH, path)
+ if t != CMD_NAME:
+ raise SFTPError("Expected name response")
+ count = msg.get_int()
+ if count != 1:
+ raise SFTPError("Realpath returned {} results".format(count))
+ return msg.get_text()
+
+ def chdir(self, path=None):
+ """
+ Change the "current directory" of this SFTP session. Since SFTP
+ doesn't really have the concept of a current working directory, this is
+ emulated by Paramiko. Once you use this method to set a working
+ directory, all operations on this `.SFTPClient` object will be relative
+ to that path. You can pass in ``None`` to stop using a current working
+ directory.
+
+ :param str path: new current working directory
+
+ :raises:
+ ``IOError`` -- if the requested path doesn't exist on the server
+
+ .. versionadded:: 1.4
+ """
+ if path is None:
+ self._cwd = None
+ return
+ if not stat.S_ISDIR(self.stat(path).st_mode):
+ code = errno.ENOTDIR
+ raise SFTPError(code, "{}: {}".format(os.strerror(code), path))
+ self._cwd = b(self.normalize(path))
+
+ def getcwd(self):
+ """
+ Return the "current working directory" for this SFTP session, as
+ emulated by Paramiko. If no directory has been set with `chdir`,
+ this method will return ``None``.
+
+ .. versionadded:: 1.4
+ """
+ # TODO: make class initialize with self._cwd set to self.normalize('.')
+ return self._cwd and u(self._cwd)
+
+ def _transfer_with_callback(self, reader, writer, file_size, callback):
+ size = 0
+ while True:
+ data = reader.read(32768)
+ writer.write(data)
+ size += len(data)
+ if len(data) == 0:
+ break
+ if callback is not None:
+ callback(size, file_size)
+ return size
+
+ def putfo(self, fl, remotepath, file_size=0, callback=None, confirm=True):
+ """
+ Copy the contents of an open file object (``fl``) to the SFTP server as
+ ``remotepath``. Any exception raised by operations will be passed
+ through.
+
+ The SFTP operations use pipelining for speed.
+
+ :param fl: opened file or file-like object to copy
+ :param str remotepath: the destination path on the SFTP server
+ :param int file_size:
+ optional size parameter passed to callback. If none is specified,
+ size defaults to 0
+ :param callable callback:
+ optional callback function (form: ``func(int, int)``) that accepts
+ the bytes transferred so far and the total bytes to be transferred
+ (since 1.7.4)
+ :param bool confirm:
+ whether to do a stat() on the file afterwards to confirm the file
+ size (since 1.7.7)
+
+ :return:
+ an `.SFTPAttributes` object containing attributes about the given
+ file.
+
+ .. versionadded:: 1.10
+ """
+ with self.file(remotepath, "wb") as fr:
+ fr.set_pipelined(True)
+ size = self._transfer_with_callback(
+ reader=fl, writer=fr, file_size=file_size, callback=callback
+ )
+ if confirm:
+ s = self.stat(remotepath)
+ if s.st_size != size:
+ raise IOError(
+ "size mismatch in put! {} != {}".format(s.st_size, size)
+ )
+ else:
+ s = SFTPAttributes()
+ return s
+
+ def put(self, localpath, remotepath, callback=None, confirm=True):
+ """
+ Copy a local file (``localpath``) to the SFTP server as ``remotepath``.
+ Any exception raised by operations will be passed through. This
+ method is primarily provided as a convenience.
+
+ The SFTP operations use pipelining for speed.
+
+ :param str localpath: the local file to copy
+ :param str remotepath: the destination path on the SFTP server. Note
+ that the filename should be included. Only specifying a directory
+ may result in an error.
+ :param callable callback:
+ optional callback function (form: ``func(int, int)``) that accepts
+ the bytes transferred so far and the total bytes to be transferred
+ :param bool confirm:
+ whether to do a stat() on the file afterwards to confirm the file
+ size
+
+ :return: an `.SFTPAttributes` object containing attributes about the
+ given file
+
+ .. versionadded:: 1.4
+ .. versionchanged:: 1.7.4
+ ``callback`` and rich attribute return value added.
+ .. versionchanged:: 1.7.7
+ ``confirm`` param added.
+ """
+ file_size = os.stat(localpath).st_size
+ with open(localpath, "rb") as fl:
+ return self.putfo(fl, remotepath, file_size, callback, confirm)
+
+ def getfo(
+ self,
+ remotepath,
+ fl,
+ callback=None,
+ prefetch=True,
+ max_concurrent_prefetch_requests=None,
+ ):
+ """
+ Copy a remote file (``remotepath``) from the SFTP server and write to
+ an open file or file-like object, ``fl``. Any exception raised by
+ operations will be passed through. This method is primarily provided
+ as a convenience.
+
+ :param object remotepath: opened file or file-like object to copy to
+ :param str fl:
+ the destination path on the local host or open file object
+ :param callable callback:
+ optional callback function (form: ``func(int, int)``) that accepts
+ the bytes transferred so far and the total bytes to be transferred
+ :param bool prefetch:
+ controls whether prefetching is performed (default: True)
+ :param int max_concurrent_prefetch_requests:
+ The maximum number of concurrent read requests to prefetch. See
+ `.SFTPClient.get` (its ``max_concurrent_prefetch_requests`` param)
+ for details.
+ :return: the `number <int>` of bytes written to the opened file object
+
+ .. versionadded:: 1.10
+ .. versionchanged:: 2.8
+ Added the ``prefetch`` keyword argument.
+ .. versionchanged:: 3.3
+ Added ``max_concurrent_prefetch_requests``.
+ """
+ file_size = self.stat(remotepath).st_size
+ with self.open(remotepath, "rb") as fr:
+ if prefetch:
+ fr.prefetch(file_size, max_concurrent_prefetch_requests)
+ return self._transfer_with_callback(
+ reader=fr, writer=fl, file_size=file_size, callback=callback
+ )
+
+ def get(
+ self,
+ remotepath,
+ localpath,
+ callback=None,
+ prefetch=True,
+ max_concurrent_prefetch_requests=None,
+ ):
+ """
+ Copy a remote file (``remotepath``) from the SFTP server to the local
+ host as ``localpath``. Any exception raised by operations will be
+ passed through. This method is primarily provided as a convenience.
+
+ :param str remotepath: the remote file to copy
+ :param str localpath: the destination path on the local host
+ :param callable callback:
+ optional callback function (form: ``func(int, int)``) that accepts
+ the bytes transferred so far and the total bytes to be transferred
+ :param bool prefetch:
+ controls whether prefetching is performed (default: True)
+ :param int max_concurrent_prefetch_requests:
+ The maximum number of concurrent read requests to prefetch.
+ When this is ``None`` (the default), do not limit the number of
+ concurrent prefetch requests. Note: OpenSSH's sftp internally
+ imposes a limit of 64 concurrent requests, while Paramiko imposes
+ no limit by default; consider setting a limit if a file can be
+ successfully received with sftp but hangs with Paramiko.
+
+ .. versionadded:: 1.4
+ .. versionchanged:: 1.7.4
+ Added the ``callback`` param
+ .. versionchanged:: 2.8
+ Added the ``prefetch`` keyword argument.
+ .. versionchanged:: 3.3
+ Added ``max_concurrent_prefetch_requests``.
+ """
+ with open(localpath, "wb") as fl:
+ size = self.getfo(
+ remotepath,
+ fl,
+ callback,
+ prefetch,
+ max_concurrent_prefetch_requests,
+ )
+ s = os.stat(localpath)
+ if s.st_size != size:
+ raise IOError(
+ "size mismatch in get! {} != {}".format(s.st_size, size)
+ )
+
+ # ...internals...
+
+ def _request(self, t, *args):
+ num = self._async_request(type(None), t, *args)
+ return self._read_response(num)
+
+ def _async_request(self, fileobj, t, *args):
+ # this method may be called from other threads (prefetch)
+ self._lock.acquire()
+ try:
+ msg = Message()
+ msg.add_int(self.request_number)
+ for item in args:
+ if isinstance(item, int64):
+ msg.add_int64(item)
+ elif isinstance(item, int):
+ msg.add_int(item)
+ elif isinstance(item, SFTPAttributes):
+ item._pack(msg)
+ else:
+ # For all other types, rely on as_string() to either coerce
+ # to bytes before writing or raise a suitable exception.
+ msg.add_string(item)
+ num = self.request_number
+ self._expecting[num] = fileobj
+ self.request_number += 1
+ finally:
+ self._lock.release()
+ self._send_packet(t, msg)
+ return num
+
+ def _read_response(self, waitfor=None):
+ while True:
+ try:
+ t, data = self._read_packet()
+ except EOFError as e:
+ raise SSHException("Server connection dropped: {}".format(e))
+ msg = Message(data)
+ num = msg.get_int()
+ self._lock.acquire()
+ try:
+ if num not in self._expecting:
+ # might be response for a file that was closed before
+ # responses came back
+ self._log(DEBUG, "Unexpected response #{}".format(num))
+ if waitfor is None:
+ # just doing a single check
+ break
+ continue
+ fileobj = self._expecting[num]
+ del self._expecting[num]
+ finally:
+ self._lock.release()
+ if num == waitfor:
+ # synchronous
+ if t == CMD_STATUS:
+ self._convert_status(msg)
+ return t, msg
+
+ # can not rewrite this to deal with E721, either as a None check
+ # nor as not an instance of None or NoneType
+ if fileobj is not type(None): # noqa
+ fileobj._async_response(t, msg, num)
+ if waitfor is None:
+ # just doing a single check
+ break
+ return None, None
+
+ def _finish_responses(self, fileobj):
+ while fileobj in self._expecting.values():
+ self._read_response()
+ fileobj._check_exception()
+
+ def _convert_status(self, msg):
+ """
+ Raises EOFError or IOError on error status; otherwise does nothing.
+ """
+ code = msg.get_int()
+ text = msg.get_text()
+ if code == SFTP_OK:
+ return
+ elif code == SFTP_EOF:
+ raise EOFError(text)
+ elif code == SFTP_NO_SUCH_FILE:
+ # clever idea from john a. meinel: map the error codes to errno
+ raise IOError(errno.ENOENT, text)
+ elif code == SFTP_PERMISSION_DENIED:
+ raise IOError(errno.EACCES, text)
+ else:
+ raise IOError(text)
+
+ def _adjust_cwd(self, path):
+ """
+ Return an adjusted path if we're emulating a "current working
+ directory" for the server.
+ """
+ path = b(path)
+ if self._cwd is None:
+ return path
+ if len(path) and path[0:1] == b_slash:
+ # absolute path
+ return path
+ if self._cwd == b_slash:
+ return self._cwd + path
+ return self._cwd + b_slash + path
+
+
+class SFTP(SFTPClient):
+ """
+ An alias for `.SFTPClient` for backwards compatibility.
+ """
+
+ pass
diff --git a/paramiko/sftp_file.py b/paramiko/sftp_file.py
new file mode 100644
index 0000000..c74695e
--- /dev/null
+++ b/paramiko/sftp_file.py
@@ -0,0 +1,594 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+SFTP file object
+"""
+
+
+from binascii import hexlify
+from collections import deque
+import socket
+import threading
+import time
+from paramiko.common import DEBUG, io_sleep
+
+from paramiko.file import BufferedFile
+from paramiko.util import u
+from paramiko.sftp import (
+ CMD_CLOSE,
+ CMD_READ,
+ CMD_DATA,
+ SFTPError,
+ CMD_WRITE,
+ CMD_STATUS,
+ CMD_FSTAT,
+ CMD_ATTRS,
+ CMD_FSETSTAT,
+ CMD_EXTENDED,
+ int64,
+)
+from paramiko.sftp_attr import SFTPAttributes
+
+
+class SFTPFile(BufferedFile):
+ """
+ Proxy object for a file on the remote server, in client mode SFTP.
+
+ Instances of this class may be used as context managers in the same way
+ that built-in Python file objects are.
+ """
+
+ # Some sftp servers will choke if you send read/write requests larger than
+ # this size.
+ MAX_REQUEST_SIZE = 32768
+
+ def __init__(self, sftp, handle, mode="r", bufsize=-1):
+ BufferedFile.__init__(self)
+ self.sftp = sftp
+ self.handle = handle
+ BufferedFile._set_mode(self, mode, bufsize)
+ self.pipelined = False
+ self._prefetching = False
+ self._prefetch_done = False
+ self._prefetch_data = {}
+ self._prefetch_extents = {}
+ self._prefetch_lock = threading.Lock()
+ self._saved_exception = None
+ self._reqs = deque()
+
+ def __del__(self):
+ self._close(async_=True)
+
+ def close(self):
+ """
+ Close the file.
+ """
+ self._close(async_=False)
+
+ def _close(self, async_=False):
+ # We allow double-close without signaling an error, because real
+ # Python file objects do. However, we must protect against actually
+ # sending multiple CMD_CLOSE packets, because after we close our
+ # handle, the same handle may be re-allocated by the server, and we
+ # may end up mysteriously closing some random other file. (This is
+ # especially important because we unconditionally call close() from
+ # __del__.)
+ if self._closed:
+ return
+ self.sftp._log(DEBUG, "close({})".format(u(hexlify(self.handle))))
+ if self.pipelined:
+ self.sftp._finish_responses(self)
+ BufferedFile.close(self)
+ try:
+ if async_:
+ # GC'd file handle could be called from an arbitrary thread
+ # -- don't wait for a response
+ self.sftp._async_request(type(None), CMD_CLOSE, self.handle)
+ else:
+ self.sftp._request(CMD_CLOSE, self.handle)
+ except EOFError:
+ # may have outlived the Transport connection
+ pass
+ except (IOError, socket.error):
+ # may have outlived the Transport connection
+ pass
+
+ def _data_in_prefetch_requests(self, offset, size):
+ k = [
+ x for x in list(self._prefetch_extents.values()) if x[0] <= offset
+ ]
+ if len(k) == 0:
+ return False
+ k.sort(key=lambda x: x[0])
+ buf_offset, buf_size = k[-1]
+ if buf_offset + buf_size <= offset:
+ # prefetch request ends before this one begins
+ return False
+ if buf_offset + buf_size >= offset + size:
+ # inclusive
+ return True
+ # well, we have part of the request. see if another chunk has
+ # the rest.
+ return self._data_in_prefetch_requests(
+ buf_offset + buf_size, offset + size - buf_offset - buf_size
+ )
+
+ def _data_in_prefetch_buffers(self, offset):
+ """
+ if a block of data is present in the prefetch buffers, at the given
+ offset, return the offset of the relevant prefetch buffer. otherwise,
+ return None. this guarantees nothing about the number of bytes
+ collected in the prefetch buffer so far.
+ """
+ k = [i for i in self._prefetch_data.keys() if i <= offset]
+ if len(k) == 0:
+ return None
+ index = max(k)
+ buf_offset = offset - index
+ if buf_offset >= len(self._prefetch_data[index]):
+ # it's not here
+ return None
+ return index
+
+ def _read_prefetch(self, size):
+ """
+ read data out of the prefetch buffer, if possible. if the data isn't
+ in the buffer, return None. otherwise, behaves like a normal read.
+ """
+ # while not closed, and haven't fetched past the current position,
+ # and haven't reached EOF...
+ while True:
+ offset = self._data_in_prefetch_buffers(self._realpos)
+ if offset is not None:
+ break
+ if self._prefetch_done or self._closed:
+ break
+ self.sftp._read_response()
+ self._check_exception()
+ if offset is None:
+ self._prefetching = False
+ return None
+ prefetch = self._prefetch_data[offset]
+ del self._prefetch_data[offset]
+
+ buf_offset = self._realpos - offset
+ if buf_offset > 0:
+ self._prefetch_data[offset] = prefetch[:buf_offset]
+ prefetch = prefetch[buf_offset:]
+ if size < len(prefetch):
+ self._prefetch_data[self._realpos + size] = prefetch[size:]
+ prefetch = prefetch[:size]
+ return prefetch
+
+ def _read(self, size):
+ size = min(size, self.MAX_REQUEST_SIZE)
+ if self._prefetching:
+ data = self._read_prefetch(size)
+ if data is not None:
+ return data
+ t, msg = self.sftp._request(
+ CMD_READ, self.handle, int64(self._realpos), int(size)
+ )
+ if t != CMD_DATA:
+ raise SFTPError("Expected data")
+ return msg.get_string()
+
+ def _write(self, data):
+ # may write less than requested if it would exceed max packet size
+ chunk = min(len(data), self.MAX_REQUEST_SIZE)
+ sftp_async_request = self.sftp._async_request(
+ type(None),
+ CMD_WRITE,
+ self.handle,
+ int64(self._realpos),
+ data[:chunk],
+ )
+ self._reqs.append(sftp_async_request)
+ if not self.pipelined or (
+ len(self._reqs) > 100 and self.sftp.sock.recv_ready()
+ ):
+ while len(self._reqs):
+ req = self._reqs.popleft()
+ t, msg = self.sftp._read_response(req)
+ if t != CMD_STATUS:
+ raise SFTPError("Expected status")
+ # convert_status already called
+ return chunk
+
+ def settimeout(self, timeout):
+ """
+ Set a timeout on read/write operations on the underlying socket or
+ ssh `.Channel`.
+
+ :param float timeout:
+ seconds to wait for a pending read/write operation before raising
+ ``socket.timeout``, or ``None`` for no timeout
+
+ .. seealso:: `.Channel.settimeout`
+ """
+ self.sftp.sock.settimeout(timeout)
+
+ def gettimeout(self):
+ """
+ Returns the timeout in seconds (as a `float`) associated with the
+ socket or ssh `.Channel` used for this file.
+
+ .. seealso:: `.Channel.gettimeout`
+ """
+ return self.sftp.sock.gettimeout()
+
+ def setblocking(self, blocking):
+ """
+ Set blocking or non-blocking mode on the underiying socket or ssh
+ `.Channel`.
+
+ :param int blocking:
+ 0 to set non-blocking mode; non-0 to set blocking mode.
+
+ .. seealso:: `.Channel.setblocking`
+ """
+ self.sftp.sock.setblocking(blocking)
+
+ def seekable(self):
+ """
+ Check if the file supports random access.
+
+ :return:
+ `True` if the file supports random access. If `False`,
+ :meth:`seek` will raise an exception
+ """
+ return True
+
+ def seek(self, offset, whence=0):
+ """
+ Set the file's current position.
+
+ See `file.seek` for details.
+ """
+ self.flush()
+ if whence == self.SEEK_SET:
+ self._realpos = self._pos = offset
+ elif whence == self.SEEK_CUR:
+ self._pos += offset
+ self._realpos = self._pos
+ else:
+ self._realpos = self._pos = self._get_size() + offset
+ self._rbuffer = bytes()
+
+ def stat(self):
+ """
+ Retrieve information about this file from the remote system. This is
+ exactly like `.SFTPClient.stat`, except that it operates on an
+ already-open file.
+
+ :returns:
+ an `.SFTPAttributes` object containing attributes about this file.
+ """
+ t, msg = self.sftp._request(CMD_FSTAT, self.handle)
+ if t != CMD_ATTRS:
+ raise SFTPError("Expected attributes")
+ return SFTPAttributes._from_msg(msg)
+
+ def chmod(self, mode):
+ """
+ Change the mode (permissions) of this file. The permissions are
+ unix-style and identical to those used by Python's `os.chmod`
+ function.
+
+ :param int mode: new permissions
+ """
+ self.sftp._log(
+ DEBUG, "chmod({}, {!r})".format(hexlify(self.handle), mode)
+ )
+ attr = SFTPAttributes()
+ attr.st_mode = mode
+ self.sftp._request(CMD_FSETSTAT, self.handle, attr)
+
+ def chown(self, uid, gid):
+ """
+ Change the owner (``uid``) and group (``gid``) of this file. As with
+ Python's `os.chown` function, you must pass both arguments, so if you
+ only want to change one, use `stat` first to retrieve the current
+ owner and group.
+
+ :param int uid: new owner's uid
+ :param int gid: new group id
+ """
+ self.sftp._log(
+ DEBUG,
+ "chown({}, {!r}, {!r})".format(hexlify(self.handle), uid, gid),
+ )
+ attr = SFTPAttributes()
+ attr.st_uid, attr.st_gid = uid, gid
+ self.sftp._request(CMD_FSETSTAT, self.handle, attr)
+
+ def utime(self, times):
+ """
+ Set the access and modified times of this file. If
+ ``times`` is ``None``, then the file's access and modified times are
+ set to the current time. Otherwise, ``times`` must be a 2-tuple of
+ numbers, of the form ``(atime, mtime)``, which is used to set the
+ access and modified times, respectively. This bizarre API is mimicked
+ from Python for the sake of consistency -- I apologize.
+
+ :param tuple times:
+ ``None`` or a tuple of (access time, modified time) in standard
+ internet epoch time (seconds since 01 January 1970 GMT)
+ """
+ if times is None:
+ times = (time.time(), time.time())
+ self.sftp._log(
+ DEBUG, "utime({}, {!r})".format(hexlify(self.handle), times)
+ )
+ attr = SFTPAttributes()
+ attr.st_atime, attr.st_mtime = times
+ self.sftp._request(CMD_FSETSTAT, self.handle, attr)
+
+ def truncate(self, size):
+ """
+ Change the size of this file. This usually extends
+ or shrinks the size of the file, just like the ``truncate()`` method on
+ Python file objects.
+
+ :param size: the new size of the file
+ """
+ self.sftp._log(
+ DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size)
+ )
+ attr = SFTPAttributes()
+ attr.st_size = size
+ self.sftp._request(CMD_FSETSTAT, self.handle, attr)
+
+ def check(self, hash_algorithm, offset=0, length=0, block_size=0):
+ """
+ Ask the server for a hash of a section of this file. This can be used
+ to verify a successful upload or download, or for various rsync-like
+ operations.
+
+ The file is hashed from ``offset``, for ``length`` bytes.
+ If ``length`` is 0, the remainder of the file is hashed. Thus, if both
+ ``offset`` and ``length`` are zero, the entire file is hashed.
+
+ Normally, ``block_size`` will be 0 (the default), and this method will
+ return a byte string representing the requested hash (for example, a
+ string of length 16 for MD5, or 20 for SHA-1). If a non-zero
+ ``block_size`` is given, each chunk of the file (from ``offset`` to
+ ``offset + length``) of ``block_size`` bytes is computed as a separate
+ hash. The hash results are all concatenated and returned as a single
+ string.
+
+ For example, ``check('sha1', 0, 1024, 512)`` will return a string of
+ length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes
+ of the file, and the last 20 bytes will be the SHA-1 of the next 512
+ bytes.
+
+ :param str hash_algorithm:
+ the name of the hash algorithm to use (normally ``"sha1"`` or
+ ``"md5"``)
+ :param offset:
+ offset into the file to begin hashing (0 means to start from the
+ beginning)
+ :param length:
+ number of bytes to hash (0 means continue to the end of the file)
+ :param int block_size:
+ number of bytes to hash per result (must not be less than 256; 0
+ means to compute only one hash of the entire segment)
+ :return:
+ `str` of bytes representing the hash of each block, concatenated
+ together
+
+ :raises:
+ ``IOError`` -- if the server doesn't support the "check-file"
+ extension, or possibly doesn't support the hash algorithm requested
+
+ .. note:: Many (most?) servers don't support this extension yet.
+
+ .. versionadded:: 1.4
+ """
+ t, msg = self.sftp._request(
+ CMD_EXTENDED,
+ "check-file",
+ self.handle,
+ hash_algorithm,
+ int64(offset),
+ int64(length),
+ block_size,
+ )
+ msg.get_text() # ext
+ msg.get_text() # alg
+ data = msg.get_remainder()
+ return data
+
+ def set_pipelined(self, pipelined=True):
+ """
+ Turn on/off the pipelining of write operations to this file. When
+ pipelining is on, paramiko won't wait for the server response after
+ each write operation. Instead, they're collected as they come in. At
+ the first non-write operation (including `.close`), all remaining
+ server responses are collected. This means that if there was an error
+ with one of your later writes, an exception might be thrown from within
+ `.close` instead of `.write`.
+
+ By default, files are not pipelined.
+
+ :param bool pipelined:
+ ``True`` if pipelining should be turned on for this file; ``False``
+ otherwise
+
+ .. versionadded:: 1.5
+ """
+ self.pipelined = pipelined
+
+ def prefetch(self, file_size=None, max_concurrent_requests=None):
+ """
+ Pre-fetch the remaining contents of this file in anticipation of future
+ `.read` calls. If reading the entire file, pre-fetching can
+ dramatically improve the download speed by avoiding roundtrip latency.
+ The file's contents are incrementally buffered in a background thread.
+
+ The prefetched data is stored in a buffer until read via the `.read`
+ method. Once data has been read, it's removed from the buffer. The
+ data may be read in a random order (using `.seek`); chunks of the
+ buffer that haven't been read will continue to be buffered.
+
+ :param int file_size:
+ When this is ``None`` (the default), this method calls `stat` to
+ determine the remote file size. In some situations, doing so can
+ cause exceptions or hangs (see `#562
+ <https://github.com/paramiko/paramiko/pull/562>`_); as a
+ workaround, one may call `stat` explicitly and pass its value in
+ via this parameter.
+ :param int max_concurrent_requests:
+ The maximum number of concurrent read requests to prefetch. See
+ `.SFTPClient.get` (its ``max_concurrent_prefetch_requests`` param)
+ for details.
+
+ .. versionadded:: 1.5.1
+ .. versionchanged:: 1.16.0
+ The ``file_size`` parameter was added (with no default value).
+ .. versionchanged:: 1.16.1
+ The ``file_size`` parameter was made optional for backwards
+ compatibility.
+ .. versionchanged:: 3.3
+ Added ``max_concurrent_requests``.
+ """
+ if file_size is None:
+ file_size = self.stat().st_size
+
+ # queue up async reads for the rest of the file
+ chunks = []
+ n = self._realpos
+ while n < file_size:
+ chunk = min(self.MAX_REQUEST_SIZE, file_size - n)
+ chunks.append((n, chunk))
+ n += chunk
+ if len(chunks) > 0:
+ self._start_prefetch(chunks, max_concurrent_requests)
+
+ def readv(self, chunks, max_concurrent_prefetch_requests=None):
+ """
+ Read a set of blocks from the file by (offset, length). This is more
+ efficient than doing a series of `.seek` and `.read` calls, since the
+ prefetch machinery is used to retrieve all the requested blocks at
+ once.
+
+ :param chunks:
+ a list of ``(offset, length)`` tuples indicating which sections of
+ the file to read
+ :param int max_concurrent_prefetch_requests:
+ The maximum number of concurrent read requests to prefetch. See
+ `.SFTPClient.get` (its ``max_concurrent_prefetch_requests`` param)
+ for details.
+ :return: a list of blocks read, in the same order as in ``chunks``
+
+ .. versionadded:: 1.5.4
+ .. versionchanged:: 3.3
+ Added ``max_concurrent_prefetch_requests``.
+ """
+ self.sftp._log(
+ DEBUG, "readv({}, {!r})".format(hexlify(self.handle), chunks)
+ )
+
+ read_chunks = []
+ for offset, size in chunks:
+ # don't fetch data that's already in the prefetch buffer
+ if self._data_in_prefetch_buffers(
+ offset
+ ) or self._data_in_prefetch_requests(offset, size):
+ continue
+
+ # break up anything larger than the max read size
+ while size > 0:
+ chunk_size = min(size, self.MAX_REQUEST_SIZE)
+ read_chunks.append((offset, chunk_size))
+ offset += chunk_size
+ size -= chunk_size
+
+ self._start_prefetch(read_chunks, max_concurrent_prefetch_requests)
+ # now we can just devolve to a bunch of read()s :)
+ for x in chunks:
+ self.seek(x[0])
+ yield self.read(x[1])
+
+ # ...internals...
+
+ def _get_size(self):
+ try:
+ return self.stat().st_size
+ except:
+ return 0
+
+ def _start_prefetch(self, chunks, max_concurrent_requests=None):
+ self._prefetching = True
+ self._prefetch_done = False
+
+ t = threading.Thread(
+ target=self._prefetch_thread,
+ args=(chunks, max_concurrent_requests),
+ )
+ t.daemon = True
+ t.start()
+
+ def _prefetch_thread(self, chunks, max_concurrent_requests):
+ # do these read requests in a temporary thread because there may be
+ # a lot of them, so it may block.
+ for offset, length in chunks:
+ # Limit the number of concurrent requests in a busy-loop
+ if max_concurrent_requests is not None:
+ while True:
+ with self._prefetch_lock:
+ pf_len = len(self._prefetch_extents)
+ if pf_len < max_concurrent_requests:
+ break
+ time.sleep(io_sleep)
+
+ num = self.sftp._async_request(
+ self, CMD_READ, self.handle, int64(offset), int(length)
+ )
+ with self._prefetch_lock:
+ self._prefetch_extents[num] = (offset, length)
+
+ def _async_response(self, t, msg, num):
+ if t == CMD_STATUS:
+ # save exception and re-raise it on next file operation
+ try:
+ self.sftp._convert_status(msg)
+ except Exception as e:
+ self._saved_exception = e
+ return
+ if t != CMD_DATA:
+ raise SFTPError("Expected data")
+ data = msg.get_string()
+ while True:
+ with self._prefetch_lock:
+ # spin if in race with _prefetch_thread
+ if num in self._prefetch_extents:
+ offset, length = self._prefetch_extents[num]
+ self._prefetch_data[offset] = data
+ del self._prefetch_extents[num]
+ if len(self._prefetch_extents) == 0:
+ self._prefetch_done = True
+ break
+
+ def _check_exception(self):
+ """if there's a saved exception, raise & clear it"""
+ if self._saved_exception is not None:
+ x = self._saved_exception
+ self._saved_exception = None
+ raise x
diff --git a/paramiko/sftp_handle.py b/paramiko/sftp_handle.py
new file mode 100644
index 0000000..b204652
--- /dev/null
+++ b/paramiko/sftp_handle.py
@@ -0,0 +1,196 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Abstraction of an SFTP file handle (for server mode).
+"""
+
+import os
+from paramiko.sftp import SFTP_OP_UNSUPPORTED, SFTP_OK
+from paramiko.util import ClosingContextManager
+
+
+class SFTPHandle(ClosingContextManager):
+ """
+ Abstract object representing a handle to an open file (or folder) in an
+ SFTP server implementation. Each handle has a string representation used
+ by the client to refer to the underlying file.
+
+ Server implementations can (and should) subclass SFTPHandle to implement
+ features of a file handle, like `stat` or `chattr`.
+
+ Instances of this class may be used as context managers.
+ """
+
+ def __init__(self, flags=0):
+ """
+ Create a new file handle representing a local file being served over
+ SFTP. If ``flags`` is passed in, it's used to determine if the file
+ is open in append mode.
+
+ :param int flags: optional flags as passed to
+ `.SFTPServerInterface.open`
+ """
+ self.__flags = flags
+ self.__name = None
+ # only for handles to folders:
+ self.__files = {}
+ self.__tell = None
+
+ def close(self):
+ """
+ When a client closes a file, this method is called on the handle.
+ Normally you would use this method to close the underlying OS level
+ file object(s).
+
+ The default implementation checks for attributes on ``self`` named
+ ``readfile`` and/or ``writefile``, and if either or both are present,
+ their ``close()`` methods are called. This means that if you are
+ using the default implementations of `read` and `write`, this
+ method's default implementation should be fine also.
+ """
+ readfile = getattr(self, "readfile", None)
+ if readfile is not None:
+ readfile.close()
+ writefile = getattr(self, "writefile", None)
+ if writefile is not None:
+ writefile.close()
+
+ def read(self, offset, length):
+ """
+ Read up to ``length`` bytes from this file, starting at position
+ ``offset``. The offset may be a Python long, since SFTP allows it
+ to be 64 bits.
+
+ If the end of the file has been reached, this method may return an
+ empty string to signify EOF, or it may also return ``SFTP_EOF``.
+
+ The default implementation checks for an attribute on ``self`` named
+ ``readfile``, and if present, performs the read operation on the Python
+ file-like object found there. (This is meant as a time saver for the
+ common case where you are wrapping a Python file object.)
+
+ :param offset: position in the file to start reading from.
+ :param int length: number of bytes to attempt to read.
+ :return: the `bytes` read, or an error code `int`.
+ """
+ readfile = getattr(self, "readfile", None)
+ if readfile is None:
+ return SFTP_OP_UNSUPPORTED
+ try:
+ if self.__tell is None:
+ self.__tell = readfile.tell()
+ if offset != self.__tell:
+ readfile.seek(offset)
+ self.__tell = offset
+ data = readfile.read(length)
+ except IOError as e:
+ self.__tell = None
+ return SFTPServer.convert_errno(e.errno)
+ self.__tell += len(data)
+ return data
+
+ def write(self, offset, data):
+ """
+ Write ``data`` into this file at position ``offset``. Extending the
+ file past its original end is expected. Unlike Python's normal
+ ``write()`` methods, this method cannot do a partial write: it must
+ write all of ``data`` or else return an error.
+
+ The default implementation checks for an attribute on ``self`` named
+ ``writefile``, and if present, performs the write operation on the
+ Python file-like object found there. The attribute is named
+ differently from ``readfile`` to make it easy to implement read-only
+ (or write-only) files, but if both attributes are present, they should
+ refer to the same file.
+
+ :param offset: position in the file to start reading from.
+ :param bytes data: data to write into the file.
+ :return: an SFTP error code like ``SFTP_OK``.
+ """
+ writefile = getattr(self, "writefile", None)
+ if writefile is None:
+ return SFTP_OP_UNSUPPORTED
+ try:
+ # in append mode, don't care about seeking
+ if (self.__flags & os.O_APPEND) == 0:
+ if self.__tell is None:
+ self.__tell = writefile.tell()
+ if offset != self.__tell:
+ writefile.seek(offset)
+ self.__tell = offset
+ writefile.write(data)
+ writefile.flush()
+ except IOError as e:
+ self.__tell = None
+ return SFTPServer.convert_errno(e.errno)
+ if self.__tell is not None:
+ self.__tell += len(data)
+ return SFTP_OK
+
+ def stat(self):
+ """
+ Return an `.SFTPAttributes` object referring to this open file, or an
+ error code. This is equivalent to `.SFTPServerInterface.stat`, except
+ it's called on an open file instead of a path.
+
+ :return:
+ an attributes object for the given file, or an SFTP error code
+ (like ``SFTP_PERMISSION_DENIED``).
+ :rtype: `.SFTPAttributes` or error code
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def chattr(self, attr):
+ """
+ Change the attributes of this file. The ``attr`` object will contain
+ only those fields provided by the client in its request, so you should
+ check for the presence of fields before using them.
+
+ :param .SFTPAttributes attr: the attributes to change on this file.
+ :return: an `int` error code like ``SFTP_OK``.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ # ...internals...
+
+ def _set_files(self, files):
+ """
+ Used by the SFTP server code to cache a directory listing. (In
+ the SFTP protocol, listing a directory is a multi-stage process
+ requiring a temporary handle.)
+ """
+ self.__files = files
+
+ def _get_next_files(self):
+ """
+ Used by the SFTP server code to retrieve a cached directory
+ listing.
+ """
+ fnlist = self.__files[:16]
+ self.__files = self.__files[16:]
+ return fnlist
+
+ def _get_name(self):
+ return self.__name
+
+ def _set_name(self, name):
+ self.__name = name
+
+
+from paramiko.sftp_server import SFTPServer
diff --git a/paramiko/sftp_server.py b/paramiko/sftp_server.py
new file mode 100644
index 0000000..cd3910d
--- /dev/null
+++ b/paramiko/sftp_server.py
@@ -0,0 +1,537 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Server-mode SFTP support.
+"""
+
+import os
+import errno
+import sys
+from hashlib import md5, sha1
+
+from paramiko import util
+from paramiko.sftp import (
+ BaseSFTP,
+ Message,
+ SFTP_FAILURE,
+ SFTP_PERMISSION_DENIED,
+ SFTP_NO_SUCH_FILE,
+ int64,
+)
+from paramiko.sftp_si import SFTPServerInterface
+from paramiko.sftp_attr import SFTPAttributes
+from paramiko.common import DEBUG
+from paramiko.server import SubsystemHandler
+from paramiko.util import b
+
+
+# known hash algorithms for the "check-file" extension
+from paramiko.sftp import (
+ CMD_HANDLE,
+ SFTP_DESC,
+ CMD_STATUS,
+ SFTP_EOF,
+ CMD_NAME,
+ SFTP_BAD_MESSAGE,
+ CMD_EXTENDED_REPLY,
+ SFTP_FLAG_READ,
+ SFTP_FLAG_WRITE,
+ SFTP_FLAG_APPEND,
+ SFTP_FLAG_CREATE,
+ SFTP_FLAG_TRUNC,
+ SFTP_FLAG_EXCL,
+ CMD_NAMES,
+ CMD_OPEN,
+ CMD_CLOSE,
+ SFTP_OK,
+ CMD_READ,
+ CMD_DATA,
+ CMD_WRITE,
+ CMD_REMOVE,
+ CMD_RENAME,
+ CMD_MKDIR,
+ CMD_RMDIR,
+ CMD_OPENDIR,
+ CMD_READDIR,
+ CMD_STAT,
+ CMD_ATTRS,
+ CMD_LSTAT,
+ CMD_FSTAT,
+ CMD_SETSTAT,
+ CMD_FSETSTAT,
+ CMD_READLINK,
+ CMD_SYMLINK,
+ CMD_REALPATH,
+ CMD_EXTENDED,
+ SFTP_OP_UNSUPPORTED,
+)
+
+_hash_class = {"sha1": sha1, "md5": md5}
+
+
+class SFTPServer(BaseSFTP, SubsystemHandler):
+ """
+ Server-side SFTP subsystem support. Since this is a `.SubsystemHandler`,
+ it can be (and is meant to be) set as the handler for ``"sftp"`` requests.
+ Use `.Transport.set_subsystem_handler` to activate this class.
+ """
+
+ def __init__(
+ self,
+ channel,
+ name,
+ server,
+ sftp_si=SFTPServerInterface,
+ *args,
+ **kwargs
+ ):
+ """
+ The constructor for SFTPServer is meant to be called from within the
+ `.Transport` as a subsystem handler. ``server`` and any additional
+ parameters or keyword parameters are passed from the original call to
+ `.Transport.set_subsystem_handler`.
+
+ :param .Channel channel: channel passed from the `.Transport`.
+ :param str name: name of the requested subsystem.
+ :param .ServerInterface server:
+ the server object associated with this channel and subsystem
+ :param sftp_si:
+ a subclass of `.SFTPServerInterface` to use for handling individual
+ requests.
+ """
+ BaseSFTP.__init__(self)
+ SubsystemHandler.__init__(self, channel, name, server)
+ transport = channel.get_transport()
+ self.logger = util.get_logger(transport.get_log_channel() + ".sftp")
+ self.ultra_debug = transport.get_hexdump()
+ self.next_handle = 1
+ # map of handle-string to SFTPHandle for files & folders:
+ self.file_table = {}
+ self.folder_table = {}
+ self.server = sftp_si(server, *args, **kwargs)
+
+ def _log(self, level, msg):
+ if issubclass(type(msg), list):
+ for m in msg:
+ super()._log(level, "[chan " + self.sock.get_name() + "] " + m)
+ else:
+ super()._log(level, "[chan " + self.sock.get_name() + "] " + msg)
+
+ def start_subsystem(self, name, transport, channel):
+ self.sock = channel
+ self._log(DEBUG, "Started sftp server on channel {!r}".format(channel))
+ self._send_server_version()
+ self.server.session_started()
+ while True:
+ try:
+ t, data = self._read_packet()
+ except EOFError:
+ self._log(DEBUG, "EOF -- end of session")
+ return
+ except Exception as e:
+ self._log(DEBUG, "Exception on channel: " + str(e))
+ self._log(DEBUG, util.tb_strings())
+ return
+ msg = Message(data)
+ request_number = msg.get_int()
+ try:
+ self._process(t, request_number, msg)
+ except Exception as e:
+ self._log(DEBUG, "Exception in server processing: " + str(e))
+ self._log(DEBUG, util.tb_strings())
+ # send some kind of failure message, at least
+ try:
+ self._send_status(request_number, SFTP_FAILURE)
+ except:
+ pass
+
+ def finish_subsystem(self):
+ self.server.session_ended()
+ super().finish_subsystem()
+ # close any file handles that were left open
+ # (so we can return them to the OS quickly)
+ for f in self.file_table.values():
+ f.close()
+ for f in self.folder_table.values():
+ f.close()
+ self.file_table = {}
+ self.folder_table = {}
+
+ @staticmethod
+ def convert_errno(e):
+ """
+ Convert an errno value (as from an ``OSError`` or ``IOError``) into a
+ standard SFTP result code. This is a convenience function for trapping
+ exceptions in server code and returning an appropriate result.
+
+ :param int e: an errno code, as from ``OSError.errno``.
+ :return: an `int` SFTP error code like ``SFTP_NO_SUCH_FILE``.
+ """
+ if e == errno.EACCES:
+ # permission denied
+ return SFTP_PERMISSION_DENIED
+ elif (e == errno.ENOENT) or (e == errno.ENOTDIR):
+ # no such file
+ return SFTP_NO_SUCH_FILE
+ else:
+ return SFTP_FAILURE
+
+ @staticmethod
+ def set_file_attr(filename, attr):
+ """
+ Change a file's attributes on the local filesystem. The contents of
+ ``attr`` are used to change the permissions, owner, group ownership,
+ and/or modification & access time of the file, depending on which
+ attributes are present in ``attr``.
+
+ This is meant to be a handy helper function for translating SFTP file
+ requests into local file operations.
+
+ :param str filename:
+ name of the file to alter (should usually be an absolute path).
+ :param .SFTPAttributes attr: attributes to change.
+ """
+ if sys.platform != "win32":
+ # mode operations are meaningless on win32
+ if attr._flags & attr.FLAG_PERMISSIONS:
+ os.chmod(filename, attr.st_mode)
+ if attr._flags & attr.FLAG_UIDGID:
+ os.chown(filename, attr.st_uid, attr.st_gid)
+ if attr._flags & attr.FLAG_AMTIME:
+ os.utime(filename, (attr.st_atime, attr.st_mtime))
+ if attr._flags & attr.FLAG_SIZE:
+ with open(filename, "w+") as f:
+ f.truncate(attr.st_size)
+
+ # ...internals...
+
+ def _response(self, request_number, t, *args):
+ msg = Message()
+ msg.add_int(request_number)
+ for item in args:
+ # NOTE: this is a very silly tiny class used for SFTPFile mostly
+ if isinstance(item, int64):
+ msg.add_int64(item)
+ elif isinstance(item, int):
+ msg.add_int(item)
+ elif isinstance(item, (str, bytes)):
+ msg.add_string(item)
+ elif type(item) is SFTPAttributes:
+ item._pack(msg)
+ else:
+ raise Exception(
+ "unknown type for {!r} type {!r}".format(item, type(item))
+ )
+ self._send_packet(t, msg)
+
+ def _send_handle_response(self, request_number, handle, folder=False):
+ if not issubclass(type(handle), SFTPHandle):
+ # must be error code
+ self._send_status(request_number, handle)
+ return
+ handle._set_name(b("hx{:d}".format(self.next_handle)))
+ self.next_handle += 1
+ if folder:
+ self.folder_table[handle._get_name()] = handle
+ else:
+ self.file_table[handle._get_name()] = handle
+ self._response(request_number, CMD_HANDLE, handle._get_name())
+
+ def _send_status(self, request_number, code, desc=None):
+ if desc is None:
+ try:
+ desc = SFTP_DESC[code]
+ except IndexError:
+ desc = "Unknown"
+ # some clients expect a "language" tag at the end
+ # (but don't mind it being blank)
+ self._response(request_number, CMD_STATUS, code, desc, "")
+
+ def _open_folder(self, request_number, path):
+ resp = self.server.list_folder(path)
+ if issubclass(type(resp), list):
+ # got an actual list of filenames in the folder
+ folder = SFTPHandle()
+ folder._set_files(resp)
+ self._send_handle_response(request_number, folder, True)
+ return
+ # must be an error code
+ self._send_status(request_number, resp)
+
+ def _read_folder(self, request_number, folder):
+ flist = folder._get_next_files()
+ if len(flist) == 0:
+ self._send_status(request_number, SFTP_EOF)
+ return
+ msg = Message()
+ msg.add_int(request_number)
+ msg.add_int(len(flist))
+ for attr in flist:
+ msg.add_string(attr.filename)
+ msg.add_string(attr)
+ attr._pack(msg)
+ self._send_packet(CMD_NAME, msg)
+
+ def _check_file(self, request_number, msg):
+ # this extension actually comes from v6 protocol, but since it's an
+ # extension, i feel like we can reasonably support it backported.
+ # it's very useful for verifying uploaded files or checking for
+ # rsync-like differences between local and remote files.
+ handle = msg.get_binary()
+ alg_list = msg.get_list()
+ start = msg.get_int64()
+ length = msg.get_int64()
+ block_size = msg.get_int()
+ if handle not in self.file_table:
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, "Invalid handle"
+ )
+ return
+ f = self.file_table[handle]
+ for x in alg_list:
+ if x in _hash_class:
+ algname = x
+ alg = _hash_class[x]
+ break
+ else:
+ self._send_status(
+ request_number, SFTP_FAILURE, "No supported hash types found"
+ )
+ return
+ if length == 0:
+ st = f.stat()
+ if not issubclass(type(st), SFTPAttributes):
+ self._send_status(request_number, st, "Unable to stat file")
+ return
+ length = st.st_size - start
+ if block_size == 0:
+ block_size = length
+ if block_size < 256:
+ self._send_status(
+ request_number, SFTP_FAILURE, "Block size too small"
+ )
+ return
+
+ sum_out = bytes()
+ offset = start
+ while offset < start + length:
+ blocklen = min(block_size, start + length - offset)
+ # don't try to read more than about 64KB at a time
+ chunklen = min(blocklen, 65536)
+ count = 0
+ hash_obj = alg()
+ while count < blocklen:
+ data = f.read(offset, chunklen)
+ if not isinstance(data, bytes):
+ self._send_status(
+ request_number, data, "Unable to hash file"
+ )
+ return
+ hash_obj.update(data)
+ count += len(data)
+ offset += count
+ sum_out += hash_obj.digest()
+
+ msg = Message()
+ msg.add_int(request_number)
+ msg.add_string("check-file")
+ msg.add_string(algname)
+ msg.add_bytes(sum_out)
+ self._send_packet(CMD_EXTENDED_REPLY, msg)
+
+ def _convert_pflags(self, pflags):
+ """convert SFTP-style open() flags to Python's os.open() flags"""
+ if (pflags & SFTP_FLAG_READ) and (pflags & SFTP_FLAG_WRITE):
+ flags = os.O_RDWR
+ elif pflags & SFTP_FLAG_WRITE:
+ flags = os.O_WRONLY
+ else:
+ flags = os.O_RDONLY
+ if pflags & SFTP_FLAG_APPEND:
+ flags |= os.O_APPEND
+ if pflags & SFTP_FLAG_CREATE:
+ flags |= os.O_CREAT
+ if pflags & SFTP_FLAG_TRUNC:
+ flags |= os.O_TRUNC
+ if pflags & SFTP_FLAG_EXCL:
+ flags |= os.O_EXCL
+ return flags
+
+ def _process(self, t, request_number, msg):
+ self._log(DEBUG, "Request: {}".format(CMD_NAMES[t]))
+ if t == CMD_OPEN:
+ path = msg.get_text()
+ flags = self._convert_pflags(msg.get_int())
+ attr = SFTPAttributes._from_msg(msg)
+ self._send_handle_response(
+ request_number, self.server.open(path, flags, attr)
+ )
+ elif t == CMD_CLOSE:
+ handle = msg.get_binary()
+ if handle in self.folder_table:
+ del self.folder_table[handle]
+ self._send_status(request_number, SFTP_OK)
+ return
+ if handle in self.file_table:
+ self.file_table[handle].close()
+ del self.file_table[handle]
+ self._send_status(request_number, SFTP_OK)
+ return
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, "Invalid handle"
+ )
+ elif t == CMD_READ:
+ handle = msg.get_binary()
+ offset = msg.get_int64()
+ length = msg.get_int()
+ if handle not in self.file_table:
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, "Invalid handle"
+ )
+ return
+ data = self.file_table[handle].read(offset, length)
+ if isinstance(data, (bytes, str)):
+ if len(data) == 0:
+ self._send_status(request_number, SFTP_EOF)
+ else:
+ self._response(request_number, CMD_DATA, data)
+ else:
+ self._send_status(request_number, data)
+ elif t == CMD_WRITE:
+ handle = msg.get_binary()
+ offset = msg.get_int64()
+ data = msg.get_binary()
+ if handle not in self.file_table:
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, "Invalid handle"
+ )
+ return
+ self._send_status(
+ request_number, self.file_table[handle].write(offset, data)
+ )
+ elif t == CMD_REMOVE:
+ path = msg.get_text()
+ self._send_status(request_number, self.server.remove(path))
+ elif t == CMD_RENAME:
+ oldpath = msg.get_text()
+ newpath = msg.get_text()
+ self._send_status(
+ request_number, self.server.rename(oldpath, newpath)
+ )
+ elif t == CMD_MKDIR:
+ path = msg.get_text()
+ attr = SFTPAttributes._from_msg(msg)
+ self._send_status(request_number, self.server.mkdir(path, attr))
+ elif t == CMD_RMDIR:
+ path = msg.get_text()
+ self._send_status(request_number, self.server.rmdir(path))
+ elif t == CMD_OPENDIR:
+ path = msg.get_text()
+ self._open_folder(request_number, path)
+ return
+ elif t == CMD_READDIR:
+ handle = msg.get_binary()
+ if handle not in self.folder_table:
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, "Invalid handle"
+ )
+ return
+ folder = self.folder_table[handle]
+ self._read_folder(request_number, folder)
+ elif t == CMD_STAT:
+ path = msg.get_text()
+ resp = self.server.stat(path)
+ if issubclass(type(resp), SFTPAttributes):
+ self._response(request_number, CMD_ATTRS, resp)
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_LSTAT:
+ path = msg.get_text()
+ resp = self.server.lstat(path)
+ if issubclass(type(resp), SFTPAttributes):
+ self._response(request_number, CMD_ATTRS, resp)
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_FSTAT:
+ handle = msg.get_binary()
+ if handle not in self.file_table:
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, "Invalid handle"
+ )
+ return
+ resp = self.file_table[handle].stat()
+ if issubclass(type(resp), SFTPAttributes):
+ self._response(request_number, CMD_ATTRS, resp)
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_SETSTAT:
+ path = msg.get_text()
+ attr = SFTPAttributes._from_msg(msg)
+ self._send_status(request_number, self.server.chattr(path, attr))
+ elif t == CMD_FSETSTAT:
+ handle = msg.get_binary()
+ attr = SFTPAttributes._from_msg(msg)
+ if handle not in self.file_table:
+ self._response(
+ request_number, SFTP_BAD_MESSAGE, "Invalid handle"
+ )
+ return
+ self._send_status(
+ request_number, self.file_table[handle].chattr(attr)
+ )
+ elif t == CMD_READLINK:
+ path = msg.get_text()
+ resp = self.server.readlink(path)
+ if isinstance(resp, (bytes, str)):
+ self._response(
+ request_number, CMD_NAME, 1, resp, "", SFTPAttributes()
+ )
+ else:
+ self._send_status(request_number, resp)
+ elif t == CMD_SYMLINK:
+ # the sftp 2 draft is incorrect here!
+ # path always follows target_path
+ target_path = msg.get_text()
+ path = msg.get_text()
+ self._send_status(
+ request_number, self.server.symlink(target_path, path)
+ )
+ elif t == CMD_REALPATH:
+ path = msg.get_text()
+ rpath = self.server.canonicalize(path)
+ self._response(
+ request_number, CMD_NAME, 1, rpath, "", SFTPAttributes()
+ )
+ elif t == CMD_EXTENDED:
+ tag = msg.get_text()
+ if tag == "check-file":
+ self._check_file(request_number, msg)
+ elif tag == "posix-rename@openssh.com":
+ oldpath = msg.get_text()
+ newpath = msg.get_text()
+ self._send_status(
+ request_number, self.server.posix_rename(oldpath, newpath)
+ )
+ else:
+ self._send_status(request_number, SFTP_OP_UNSUPPORTED)
+ else:
+ self._send_status(request_number, SFTP_OP_UNSUPPORTED)
+
+
+from paramiko.sftp_handle import SFTPHandle
diff --git a/paramiko/sftp_si.py b/paramiko/sftp_si.py
new file mode 100644
index 0000000..72b5db9
--- /dev/null
+++ b/paramiko/sftp_si.py
@@ -0,0 +1,316 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+An interface to override for SFTP server support.
+"""
+
+import os
+import sys
+from paramiko.sftp import SFTP_OP_UNSUPPORTED
+
+
+class SFTPServerInterface:
+ """
+ This class defines an interface for controlling the behavior of paramiko
+ when using the `.SFTPServer` subsystem to provide an SFTP server.
+
+ Methods on this class are called from the SFTP session's thread, so you can
+ block as long as necessary without affecting other sessions (even other
+ SFTP sessions). However, raising an exception will usually cause the SFTP
+ session to abruptly end, so you will usually want to catch exceptions and
+ return an appropriate error code.
+
+ All paths are in string form instead of unicode because not all SFTP
+ clients & servers obey the requirement that paths be encoded in UTF-8.
+ """
+
+ def __init__(self, server, *args, **kwargs):
+ """
+ Create a new SFTPServerInterface object. This method does nothing by
+ default and is meant to be overridden by subclasses.
+
+ :param .ServerInterface server:
+ the server object associated with this channel and SFTP subsystem
+ """
+ super().__init__(*args, **kwargs)
+
+ def session_started(self):
+ """
+ The SFTP server session has just started. This method is meant to be
+ overridden to perform any necessary setup before handling callbacks
+ from SFTP operations.
+ """
+ pass
+
+ def session_ended(self):
+ """
+ The SFTP server session has just ended, either cleanly or via an
+ exception. This method is meant to be overridden to perform any
+ necessary cleanup before this `.SFTPServerInterface` object is
+ destroyed.
+ """
+ pass
+
+ def open(self, path, flags, attr):
+ """
+ Open a file on the server and create a handle for future operations
+ on that file. On success, a new object subclassed from `.SFTPHandle`
+ should be returned. This handle will be used for future operations
+ on the file (read, write, etc). On failure, an error code such as
+ ``SFTP_PERMISSION_DENIED`` should be returned.
+
+ ``flags`` contains the requested mode for opening (read-only,
+ write-append, etc) as a bitset of flags from the ``os`` module:
+
+ - ``os.O_RDONLY``
+ - ``os.O_WRONLY``
+ - ``os.O_RDWR``
+ - ``os.O_APPEND``
+ - ``os.O_CREAT``
+ - ``os.O_TRUNC``
+ - ``os.O_EXCL``
+
+ (One of ``os.O_RDONLY``, ``os.O_WRONLY``, or ``os.O_RDWR`` will always
+ be set.)
+
+ The ``attr`` object contains requested attributes of the file if it
+ has to be created. Some or all attribute fields may be missing if
+ the client didn't specify them.
+
+ .. note:: The SFTP protocol defines all files to be in "binary" mode.
+ There is no equivalent to Python's "text" mode.
+
+ :param str path:
+ the requested path (relative or absolute) of the file to be opened.
+ :param int flags:
+ flags or'd together from the ``os`` module indicating the requested
+ mode for opening the file.
+ :param .SFTPAttributes attr:
+ requested attributes of the file if it is newly created.
+ :return: a new `.SFTPHandle` or error code.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def list_folder(self, path):
+ """
+ Return a list of files within a given folder. The ``path`` will use
+ posix notation (``"/"`` separates folder names) and may be an absolute
+ or relative path.
+
+ The list of files is expected to be a list of `.SFTPAttributes`
+ objects, which are similar in structure to the objects returned by
+ ``os.stat``. In addition, each object should have its ``filename``
+ field filled in, since this is important to a directory listing and
+ not normally present in ``os.stat`` results. The method
+ `.SFTPAttributes.from_stat` will usually do what you want.
+
+ In case of an error, you should return one of the ``SFTP_*`` error
+ codes, such as ``SFTP_PERMISSION_DENIED``.
+
+ :param str path: the requested path (relative or absolute) to be
+ listed.
+ :return:
+ a list of the files in the given folder, using `.SFTPAttributes`
+ objects.
+
+ .. note::
+ You should normalize the given ``path`` first (see the `os.path`
+ module) and check appropriate permissions before returning the list
+ of files. Be careful of malicious clients attempting to use
+ relative paths to escape restricted folders, if you're doing a
+ direct translation from the SFTP server path to your local
+ filesystem.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def stat(self, path):
+ """
+ Return an `.SFTPAttributes` object for a path on the server, or an
+ error code. If your server supports symbolic links (also known as
+ "aliases"), you should follow them. (`lstat` is the corresponding
+ call that doesn't follow symlinks/aliases.)
+
+ :param str path:
+ the requested path (relative or absolute) to fetch file statistics
+ for.
+ :return:
+ an `.SFTPAttributes` object for the given file, or an SFTP error
+ code (like ``SFTP_PERMISSION_DENIED``).
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def lstat(self, path):
+ """
+ Return an `.SFTPAttributes` object for a path on the server, or an
+ error code. If your server supports symbolic links (also known as
+ "aliases"), you should not follow them -- instead, you should
+ return data on the symlink or alias itself. (`stat` is the
+ corresponding call that follows symlinks/aliases.)
+
+ :param str path:
+ the requested path (relative or absolute) to fetch file statistics
+ for.
+ :type path: str
+ :return:
+ an `.SFTPAttributes` object for the given file, or an SFTP error
+ code (like ``SFTP_PERMISSION_DENIED``).
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def remove(self, path):
+ """
+ Delete a file, if possible.
+
+ :param str path:
+ the requested path (relative or absolute) of the file to delete.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def rename(self, oldpath, newpath):
+ """
+ Rename (or move) a file. The SFTP specification implies that this
+ method can be used to move an existing file into a different folder,
+ and since there's no other (easy) way to move files via SFTP, it's
+ probably a good idea to implement "move" in this method too, even for
+ files that cross disk partition boundaries, if at all possible.
+
+ .. note:: You should return an error if a file with the same name as
+ ``newpath`` already exists. (The rename operation should be
+ non-desctructive.)
+
+ .. note::
+ This method implements 'standard' SFTP ``RENAME`` behavior; those
+ seeking the OpenSSH "POSIX rename" extension behavior should use
+ `posix_rename`.
+
+ :param str oldpath:
+ the requested path (relative or absolute) of the existing file.
+ :param str newpath: the requested new path of the file.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def posix_rename(self, oldpath, newpath):
+ """
+ Rename (or move) a file, following posix conventions. If newpath
+ already exists, it will be overwritten.
+
+ :param str oldpath:
+ the requested path (relative or absolute) of the existing file.
+ :param str newpath: the requested new path of the file.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
+
+ :versionadded: 2.2
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def mkdir(self, path, attr):
+ """
+ Create a new directory with the given attributes. The ``attr``
+ object may be considered a "hint" and ignored.
+
+ The ``attr`` object will contain only those fields provided by the
+ client in its request, so you should use ``hasattr`` to check for
+ the presence of fields before using them. In some cases, the ``attr``
+ object may be completely empty.
+
+ :param str path:
+ requested path (relative or absolute) of the new folder.
+ :param .SFTPAttributes attr: requested attributes of the new folder.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def rmdir(self, path):
+ """
+ Remove a directory if it exists. The ``path`` should refer to an
+ existing, empty folder -- otherwise this method should return an
+ error.
+
+ :param str path:
+ requested path (relative or absolute) of the folder to remove.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def chattr(self, path, attr):
+ """
+ Change the attributes of a file. The ``attr`` object will contain
+ only those fields provided by the client in its request, so you
+ should check for the presence of fields before using them.
+
+ :param str path:
+ requested path (relative or absolute) of the file to change.
+ :param attr:
+ requested attributes to change on the file (an `.SFTPAttributes`
+ object)
+ :return: an error code `int` like ``SFTP_OK``.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def canonicalize(self, path):
+ """
+ Return the canonical form of a path on the server. For example,
+ if the server's home folder is ``/home/foo``, the path
+ ``"../betty"`` would be canonicalized to ``"/home/betty"``. Note
+ the obvious security issues: if you're serving files only from a
+ specific folder, you probably don't want this method to reveal path
+ names outside that folder.
+
+ You may find the Python methods in ``os.path`` useful, especially
+ ``os.path.normpath`` and ``os.path.realpath``.
+
+ The default implementation returns ``os.path.normpath('/' + path)``.
+ """
+ if os.path.isabs(path):
+ out = os.path.normpath(path)
+ else:
+ out = os.path.normpath("/" + path)
+ if sys.platform == "win32":
+ # on windows, normalize backslashes to sftp/posix format
+ out = out.replace("\\", "/")
+ return out
+
+ def readlink(self, path):
+ """
+ Return the target of a symbolic link (or shortcut) on the server.
+ If the specified path doesn't refer to a symbolic link, an error
+ should be returned.
+
+ :param str path: path (relative or absolute) of the symbolic link.
+ :return:
+ the target `str` path of the symbolic link, or an error code like
+ ``SFTP_NO_SUCH_FILE``.
+ """
+ return SFTP_OP_UNSUPPORTED
+
+ def symlink(self, target_path, path):
+ """
+ Create a symbolic link on the server, as new pathname ``path``,
+ with ``target_path`` as the target of the link.
+
+ :param str target_path:
+ path (relative or absolute) of the target for this new symbolic
+ link.
+ :param str path:
+ path (relative or absolute) of the symbolic link to create.
+ :return: an error code `int` like ``SFTP_OK``.
+ """
+ return SFTP_OP_UNSUPPORTED
diff --git a/paramiko/ssh_exception.py b/paramiko/ssh_exception.py
new file mode 100644
index 0000000..2b68ebe
--- /dev/null
+++ b/paramiko/ssh_exception.py
@@ -0,0 +1,250 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import socket
+
+
+class SSHException(Exception):
+ """
+ Exception raised by failures in SSH2 protocol negotiation or logic errors.
+ """
+
+ pass
+
+
+class AuthenticationException(SSHException):
+ """
+ Exception raised when authentication failed for some reason. It may be
+ possible to retry with different credentials. (Other classes specify more
+ specific reasons.)
+
+ .. versionadded:: 1.6
+ """
+
+ pass
+
+
+class PasswordRequiredException(AuthenticationException):
+ """
+ Exception raised when a password is needed to unlock a private key file.
+ """
+
+ pass
+
+
+class BadAuthenticationType(AuthenticationException):
+ """
+ Exception raised when an authentication type (like password) is used, but
+ the server isn't allowing that type. (It may only allow public-key, for
+ example.)
+
+ .. versionadded:: 1.1
+ """
+
+ allowed_types = []
+
+ # TODO 4.0: remove explanation kwarg
+ def __init__(self, explanation, types):
+ # TODO 4.0: remove this supercall unless it's actually required for
+ # pickling (after fixing pickling)
+ AuthenticationException.__init__(self, explanation, types)
+ self.explanation = explanation
+ self.allowed_types = types
+
+ def __str__(self):
+ return "{}; allowed types: {!r}".format(
+ self.explanation, self.allowed_types
+ )
+
+
+class PartialAuthentication(AuthenticationException):
+ """
+ An internal exception thrown in the case of partial authentication.
+ """
+
+ allowed_types = []
+
+ def __init__(self, types):
+ AuthenticationException.__init__(self, types)
+ self.allowed_types = types
+
+ def __str__(self):
+ return "Partial authentication; allowed types: {!r}".format(
+ self.allowed_types
+ )
+
+
+# TODO 4.0: stop inheriting from SSHException, move to auth.py
+class UnableToAuthenticate(AuthenticationException):
+ pass
+
+
+class ChannelException(SSHException):
+ """
+ Exception raised when an attempt to open a new `.Channel` fails.
+
+ :param int code: the error code returned by the server
+
+ .. versionadded:: 1.6
+ """
+
+ def __init__(self, code, text):
+ SSHException.__init__(self, code, text)
+ self.code = code
+ self.text = text
+
+ def __str__(self):
+ return "ChannelException({!r}, {!r})".format(self.code, self.text)
+
+
+class BadHostKeyException(SSHException):
+ """
+ The host key given by the SSH server did not match what we were expecting.
+
+ :param str hostname: the hostname of the SSH server
+ :param PKey got_key: the host key presented by the server
+ :param PKey expected_key: the host key expected
+
+ .. versionadded:: 1.6
+ """
+
+ def __init__(self, hostname, got_key, expected_key):
+ SSHException.__init__(self, hostname, got_key, expected_key)
+ self.hostname = hostname
+ self.key = got_key
+ self.expected_key = expected_key
+
+ def __str__(self):
+ msg = "Host key for server '{}' does not match: got '{}', expected '{}'" # noqa
+ return msg.format(
+ self.hostname,
+ self.key.get_base64(),
+ self.expected_key.get_base64(),
+ )
+
+
+class IncompatiblePeer(SSHException):
+ """
+ A disagreement arose regarding an algorithm required for key exchange.
+
+ .. versionadded:: 2.9
+ """
+
+ # TODO 4.0: consider making this annotate w/ 1..N 'missing' algorithms,
+ # either just the first one that would halt kex, or even updating the
+ # Transport logic so we record /all/ that /could/ halt kex.
+ # TODO: update docstrings where this may end up raised so they are more
+ # specific.
+ pass
+
+
+class ProxyCommandFailure(SSHException):
+ """
+ The "ProxyCommand" found in the .ssh/config file returned an error.
+
+ :param str command: The command line that is generating this exception.
+ :param str error: The error captured from the proxy command output.
+ """
+
+ def __init__(self, command, error):
+ SSHException.__init__(self, command, error)
+ self.command = command
+ self.error = error
+
+ def __str__(self):
+ return 'ProxyCommand("{}") returned nonzero exit status: {}'.format(
+ self.command, self.error
+ )
+
+
+class NoValidConnectionsError(socket.error):
+ """
+ Multiple connection attempts were made and no families succeeded.
+
+ This exception class wraps multiple "real" underlying connection errors,
+ all of which represent failed connection attempts. Because these errors are
+ not guaranteed to all be of the same error type (i.e. different errno,
+ `socket.error` subclass, message, etc) we expose a single unified error
+ message and a ``None`` errno so that instances of this class match most
+ normal handling of `socket.error` objects.
+
+ To see the wrapped exception objects, access the ``errors`` attribute.
+ ``errors`` is a dict whose keys are address tuples (e.g. ``('127.0.0.1',
+ 22)``) and whose values are the exception encountered trying to connect to
+ that address.
+
+ It is implied/assumed that all the errors given to a single instance of
+ this class are from connecting to the same hostname + port (and thus that
+ the differences are in the resolution of the hostname - e.g. IPv4 vs v6).
+
+ .. versionadded:: 1.16
+ """
+
+ def __init__(self, errors):
+ """
+ :param dict errors:
+ The errors dict to store, as described by class docstring.
+ """
+ addrs = sorted(errors.keys())
+ body = ", ".join([x[0] for x in addrs[:-1]])
+ tail = addrs[-1][0]
+ if body:
+ msg = "Unable to connect to port {0} on {1} or {2}"
+ else:
+ msg = "Unable to connect to port {0} on {2}"
+ super().__init__(
+ None, msg.format(addrs[0][1], body, tail) # stand-in for errno
+ )
+ self.errors = errors
+
+ def __reduce__(self):
+ return (self.__class__, (self.errors,))
+
+
+class CouldNotCanonicalize(SSHException):
+ """
+ Raised when hostname canonicalization fails & fallback is disabled.
+
+ .. versionadded:: 2.7
+ """
+
+ pass
+
+
+class ConfigParseError(SSHException):
+ """
+ A fatal error was encountered trying to parse SSH config data.
+
+ Typically this means a config file violated the ``ssh_config``
+ specification in a manner that requires exiting immediately, such as not
+ matching ``key = value`` syntax or misusing certain ``Match`` keywords.
+
+ .. versionadded:: 2.7
+ """
+
+ pass
+
+
+class MessageOrderError(SSHException):
+ """
+ Out-of-order protocol messages were received, violating "strict kex" mode.
+
+ .. versionadded:: 3.4
+ """
+
+ pass
diff --git a/paramiko/ssh_gss.py b/paramiko/ssh_gss.py
new file mode 100644
index 0000000..ee49c34
--- /dev/null
+++ b/paramiko/ssh_gss.py
@@ -0,0 +1,778 @@
+# Copyright (C) 2013-2014 science + computing ag
+# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
+#
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+"""
+This module provides GSS-API / SSPI authentication as defined in :rfc:`4462`.
+
+.. note:: Credential delegation is not supported in server mode.
+
+.. seealso:: :doc:`/api/kex_gss`
+
+.. versionadded:: 1.15
+"""
+
+import struct
+import os
+import sys
+
+
+#: A boolean constraint that indicates if GSS-API / SSPI is available.
+GSS_AUTH_AVAILABLE = True
+
+
+#: A tuple of the exception types used by the underlying GSSAPI implementation.
+GSS_EXCEPTIONS = ()
+
+
+#: :var str _API: Constraint for the used API
+_API = None
+
+try:
+ import gssapi
+
+ if hasattr(gssapi, "__title__") and gssapi.__title__ == "python-gssapi":
+ # old, unmaintained python-gssapi package
+ _API = "MIT" # keep this for compatibility
+ GSS_EXCEPTIONS = (gssapi.GSSException,)
+ else:
+ _API = "PYTHON-GSSAPI-NEW"
+ GSS_EXCEPTIONS = (
+ gssapi.exceptions.GeneralError,
+ gssapi.raw.misc.GSSError,
+ )
+except (ImportError, OSError):
+ try:
+ import pywintypes
+ import sspicon
+ import sspi
+
+ _API = "SSPI"
+ GSS_EXCEPTIONS = (pywintypes.error,)
+ except ImportError:
+ GSS_AUTH_AVAILABLE = False
+ _API = None
+
+from paramiko.common import MSG_USERAUTH_REQUEST
+from paramiko.ssh_exception import SSHException
+from paramiko._version import __version_info__
+
+
+def GSSAuth(auth_method, gss_deleg_creds=True):
+ """
+ Provide SSH2 GSS-API / SSPI authentication.
+
+ :param str auth_method: The name of the SSH authentication mechanism
+ (gssapi-with-mic or gss-keyex)
+ :param bool gss_deleg_creds: Delegate client credentials or not.
+ We delegate credentials by default.
+ :return: Either an `._SSH_GSSAPI_OLD` or `._SSH_GSSAPI_NEW` (Unix)
+ object or an `_SSH_SSPI` (Windows) object
+ :rtype: object
+
+ :raises: ``ImportError`` -- If no GSS-API / SSPI module could be imported.
+
+ :see: `RFC 4462 <http://www.ietf.org/rfc/rfc4462.txt>`_
+ :note: Check for the available API and return either an `._SSH_GSSAPI_OLD`
+ (MIT GSSAPI using python-gssapi package) object, an
+ `._SSH_GSSAPI_NEW` (MIT GSSAPI using gssapi package) object
+ or an `._SSH_SSPI` (MS SSPI) object.
+ If there is no supported API available,
+ ``None`` will be returned.
+ """
+ if _API == "MIT":
+ return _SSH_GSSAPI_OLD(auth_method, gss_deleg_creds)
+ elif _API == "PYTHON-GSSAPI-NEW":
+ return _SSH_GSSAPI_NEW(auth_method, gss_deleg_creds)
+ elif _API == "SSPI" and os.name == "nt":
+ return _SSH_SSPI(auth_method, gss_deleg_creds)
+ else:
+ raise ImportError("Unable to import a GSS-API / SSPI module!")
+
+
+class _SSH_GSSAuth:
+ """
+ Contains the shared variables and methods of `._SSH_GSSAPI_OLD`,
+ `._SSH_GSSAPI_NEW` and `._SSH_SSPI`.
+ """
+
+ def __init__(self, auth_method, gss_deleg_creds):
+ """
+ :param str auth_method: The name of the SSH authentication mechanism
+ (gssapi-with-mic or gss-keyex)
+ :param bool gss_deleg_creds: Delegate client credentials or not
+ """
+ self._auth_method = auth_method
+ self._gss_deleg_creds = gss_deleg_creds
+ self._gss_host = None
+ self._username = None
+ self._session_id = None
+ self._service = "ssh-connection"
+ """
+ OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,
+ so we also support the krb5 mechanism only.
+ """
+ self._krb5_mech = "1.2.840.113554.1.2.2"
+
+ # client mode
+ self._gss_ctxt = None
+ self._gss_ctxt_status = False
+
+ # server mode
+ self._gss_srv_ctxt = None
+ self._gss_srv_ctxt_status = False
+ self.cc_file = None
+
+ def set_service(self, service):
+ """
+ This is just a setter to use a non default service.
+ I added this method, because RFC 4462 doesn't specify "ssh-connection"
+ as the only service value.
+
+ :param str service: The desired SSH service
+ """
+ if service.find("ssh-"):
+ self._service = service
+
+ def set_username(self, username):
+ """
+ Setter for C{username}. If GSS-API Key Exchange is performed, the
+ username is not set by C{ssh_init_sec_context}.
+
+ :param str username: The name of the user who attempts to login
+ """
+ self._username = username
+
+ def ssh_gss_oids(self, mode="client"):
+ """
+ This method returns a single OID, because we only support the
+ Kerberos V5 mechanism.
+
+ :param str mode: Client for client mode and server for server mode
+ :return: A byte sequence containing the number of supported
+ OIDs, the length of the OID and the actual OID encoded with
+ DER
+ :note: In server mode we just return the OID length and the DER encoded
+ OID.
+ """
+ from pyasn1.type.univ import ObjectIdentifier
+ from pyasn1.codec.der import encoder
+
+ OIDs = self._make_uint32(1)
+ krb5_OID = encoder.encode(ObjectIdentifier(self._krb5_mech))
+ OID_len = self._make_uint32(len(krb5_OID))
+ if mode == "server":
+ return OID_len + krb5_OID
+ return OIDs + OID_len + krb5_OID
+
+ def ssh_check_mech(self, desired_mech):
+ """
+ Check if the given OID is the Kerberos V5 OID (server mode).
+
+ :param str desired_mech: The desired GSS-API mechanism of the client
+ :return: ``True`` if the given OID is supported, otherwise C{False}
+ """
+ from pyasn1.codec.der import decoder
+
+ mech, __ = decoder.decode(desired_mech)
+ if mech.__str__() != self._krb5_mech:
+ return False
+ return True
+
+ # Internals
+ # -------------------------------------------------------------------------
+ def _make_uint32(self, integer):
+ """
+ Create a 32 bit unsigned integer (The byte sequence of an integer).
+
+ :param int integer: The integer value to convert
+ :return: The byte sequence of an 32 bit integer
+ """
+ return struct.pack("!I", integer)
+
+ def _ssh_build_mic(self, session_id, username, service, auth_method):
+ """
+ Create the SSH2 MIC filed for gssapi-with-mic.
+
+ :param str session_id: The SSH session ID
+ :param str username: The name of the user who attempts to login
+ :param str service: The requested SSH service
+ :param str auth_method: The requested SSH authentication mechanism
+ :return: The MIC as defined in RFC 4462. The contents of the
+ MIC field are:
+ string session_identifier,
+ byte SSH_MSG_USERAUTH_REQUEST,
+ string user-name,
+ string service (ssh-connection),
+ string authentication-method
+ (gssapi-with-mic or gssapi-keyex)
+ """
+ mic = self._make_uint32(len(session_id))
+ mic += session_id
+ mic += struct.pack("B", MSG_USERAUTH_REQUEST)
+ mic += self._make_uint32(len(username))
+ mic += username.encode()
+ mic += self._make_uint32(len(service))
+ mic += service.encode()
+ mic += self._make_uint32(len(auth_method))
+ mic += auth_method.encode()
+ return mic
+
+
+class _SSH_GSSAPI_OLD(_SSH_GSSAuth):
+ """
+ Implementation of the GSS-API MIT Kerberos Authentication for SSH2,
+ using the older (unmaintained) python-gssapi package.
+
+ :see: `.GSSAuth`
+ """
+
+ def __init__(self, auth_method, gss_deleg_creds):
+ """
+ :param str auth_method: The name of the SSH authentication mechanism
+ (gssapi-with-mic or gss-keyex)
+ :param bool gss_deleg_creds: Delegate client credentials or not
+ """
+ _SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
+
+ if self._gss_deleg_creds:
+ self._gss_flags = (
+ gssapi.C_PROT_READY_FLAG,
+ gssapi.C_INTEG_FLAG,
+ gssapi.C_MUTUAL_FLAG,
+ gssapi.C_DELEG_FLAG,
+ )
+ else:
+ self._gss_flags = (
+ gssapi.C_PROT_READY_FLAG,
+ gssapi.C_INTEG_FLAG,
+ gssapi.C_MUTUAL_FLAG,
+ )
+
+ def ssh_init_sec_context(
+ self, target, desired_mech=None, username=None, recv_token=None
+ ):
+ """
+ Initialize a GSS-API context.
+
+ :param str username: The name of the user who attempts to login
+ :param str target: The hostname of the target to connect to
+ :param str desired_mech: The negotiated GSS-API mechanism
+ ("pseudo negotiated" mechanism, because we
+ support just the krb5 mechanism :-))
+ :param str recv_token: The GSS-API token received from the Server
+ :raises:
+ `.SSHException` -- Is raised if the desired mechanism of the client
+ is not supported
+ :return: A ``String`` if the GSS-API has returned a token or
+ ``None`` if no token was returned
+ """
+ from pyasn1.codec.der import decoder
+
+ self._username = username
+ self._gss_host = target
+ targ_name = gssapi.Name(
+ "host@" + self._gss_host, gssapi.C_NT_HOSTBASED_SERVICE
+ )
+ ctx = gssapi.Context()
+ ctx.flags = self._gss_flags
+ if desired_mech is None:
+ krb5_mech = gssapi.OID.mech_from_string(self._krb5_mech)
+ else:
+ mech, __ = decoder.decode(desired_mech)
+ if mech.__str__() != self._krb5_mech:
+ raise SSHException("Unsupported mechanism OID.")
+ else:
+ krb5_mech = gssapi.OID.mech_from_string(self._krb5_mech)
+ token = None
+ try:
+ if recv_token is None:
+ self._gss_ctxt = gssapi.InitContext(
+ peer_name=targ_name,
+ mech_type=krb5_mech,
+ req_flags=ctx.flags,
+ )
+ token = self._gss_ctxt.step(token)
+ else:
+ token = self._gss_ctxt.step(recv_token)
+ except gssapi.GSSException:
+ message = "{} Target: {}".format(sys.exc_info()[1], self._gss_host)
+ raise gssapi.GSSException(message)
+ self._gss_ctxt_status = self._gss_ctxt.established
+ return token
+
+ def ssh_get_mic(self, session_id, gss_kex=False):
+ """
+ Create the MIC token for a SSH2 message.
+
+ :param str session_id: The SSH session ID
+ :param bool gss_kex: Generate the MIC for GSS-API Key Exchange or not
+ :return: gssapi-with-mic:
+ Returns the MIC token from GSS-API for the message we created
+ with ``_ssh_build_mic``.
+ gssapi-keyex:
+ Returns the MIC token from GSS-API with the SSH session ID as
+ message.
+ """
+ self._session_id = session_id
+ if not gss_kex:
+ mic_field = self._ssh_build_mic(
+ self._session_id,
+ self._username,
+ self._service,
+ self._auth_method,
+ )
+ mic_token = self._gss_ctxt.get_mic(mic_field)
+ else:
+ # for key exchange with gssapi-keyex
+ mic_token = self._gss_srv_ctxt.get_mic(self._session_id)
+ return mic_token
+
+ def ssh_accept_sec_context(self, hostname, recv_token, username=None):
+ """
+ Accept a GSS-API context (server mode).
+
+ :param str hostname: The servers hostname
+ :param str username: The name of the user who attempts to login
+ :param str recv_token: The GSS-API Token received from the server,
+ if it's not the initial call.
+ :return: A ``String`` if the GSS-API has returned a token or ``None``
+ if no token was returned
+ """
+ # hostname and username are not required for GSSAPI, but for SSPI
+ self._gss_host = hostname
+ self._username = username
+ if self._gss_srv_ctxt is None:
+ self._gss_srv_ctxt = gssapi.AcceptContext()
+ token = self._gss_srv_ctxt.step(recv_token)
+ self._gss_srv_ctxt_status = self._gss_srv_ctxt.established
+ return token
+
+ def ssh_check_mic(self, mic_token, session_id, username=None):
+ """
+ Verify the MIC token for a SSH2 message.
+
+ :param str mic_token: The MIC token received from the client
+ :param str session_id: The SSH session ID
+ :param str username: The name of the user who attempts to login
+ :return: None if the MIC check was successful
+ :raises: ``gssapi.GSSException`` -- if the MIC check failed
+ """
+ self._session_id = session_id
+ self._username = username
+ if self._username is not None:
+ # server mode
+ mic_field = self._ssh_build_mic(
+ self._session_id,
+ self._username,
+ self._service,
+ self._auth_method,
+ )
+ self._gss_srv_ctxt.verify_mic(mic_field, mic_token)
+ else:
+ # for key exchange with gssapi-keyex
+ # client mode
+ self._gss_ctxt.verify_mic(self._session_id, mic_token)
+
+ @property
+ def credentials_delegated(self):
+ """
+ Checks if credentials are delegated (server mode).
+
+ :return: ``True`` if credentials are delegated, otherwise ``False``
+ """
+ if self._gss_srv_ctxt.delegated_cred is not None:
+ return True
+ return False
+
+ def save_client_creds(self, client_token):
+ """
+ Save the Client token in a file. This is used by the SSH server
+ to store the client credentials if credentials are delegated
+ (server mode).
+
+ :param str client_token: The GSS-API token received form the client
+ :raises:
+ ``NotImplementedError`` -- Credential delegation is currently not
+ supported in server mode
+ """
+ raise NotImplementedError
+
+
+if __version_info__ < (2, 5):
+ # provide the old name for strict backward compatibility
+ _SSH_GSSAPI = _SSH_GSSAPI_OLD
+
+
+class _SSH_GSSAPI_NEW(_SSH_GSSAuth):
+ """
+ Implementation of the GSS-API MIT Kerberos Authentication for SSH2,
+ using the newer, currently maintained gssapi package.
+
+ :see: `.GSSAuth`
+ """
+
+ def __init__(self, auth_method, gss_deleg_creds):
+ """
+ :param str auth_method: The name of the SSH authentication mechanism
+ (gssapi-with-mic or gss-keyex)
+ :param bool gss_deleg_creds: Delegate client credentials or not
+ """
+ _SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
+
+ if self._gss_deleg_creds:
+ self._gss_flags = (
+ gssapi.RequirementFlag.protection_ready,
+ gssapi.RequirementFlag.integrity,
+ gssapi.RequirementFlag.mutual_authentication,
+ gssapi.RequirementFlag.delegate_to_peer,
+ )
+ else:
+ self._gss_flags = (
+ gssapi.RequirementFlag.protection_ready,
+ gssapi.RequirementFlag.integrity,
+ gssapi.RequirementFlag.mutual_authentication,
+ )
+
+ def ssh_init_sec_context(
+ self, target, desired_mech=None, username=None, recv_token=None
+ ):
+ """
+ Initialize a GSS-API context.
+
+ :param str username: The name of the user who attempts to login
+ :param str target: The hostname of the target to connect to
+ :param str desired_mech: The negotiated GSS-API mechanism
+ ("pseudo negotiated" mechanism, because we
+ support just the krb5 mechanism :-))
+ :param str recv_token: The GSS-API token received from the Server
+ :raises: `.SSHException` -- Is raised if the desired mechanism of the
+ client is not supported
+ :raises: ``gssapi.exceptions.GSSError`` if there is an error signaled
+ by the GSS-API implementation
+ :return: A ``String`` if the GSS-API has returned a token or ``None``
+ if no token was returned
+ """
+ from pyasn1.codec.der import decoder
+
+ self._username = username
+ self._gss_host = target
+ targ_name = gssapi.Name(
+ "host@" + self._gss_host,
+ name_type=gssapi.NameType.hostbased_service,
+ )
+ if desired_mech is not None:
+ mech, __ = decoder.decode(desired_mech)
+ if mech.__str__() != self._krb5_mech:
+ raise SSHException("Unsupported mechanism OID.")
+ krb5_mech = gssapi.MechType.kerberos
+ token = None
+ if recv_token is None:
+ self._gss_ctxt = gssapi.SecurityContext(
+ name=targ_name,
+ flags=self._gss_flags,
+ mech=krb5_mech,
+ usage="initiate",
+ )
+ token = self._gss_ctxt.step(token)
+ else:
+ token = self._gss_ctxt.step(recv_token)
+ self._gss_ctxt_status = self._gss_ctxt.complete
+ return token
+
+ def ssh_get_mic(self, session_id, gss_kex=False):
+ """
+ Create the MIC token for a SSH2 message.
+
+ :param str session_id: The SSH session ID
+ :param bool gss_kex: Generate the MIC for GSS-API Key Exchange or not
+ :return: gssapi-with-mic:
+ Returns the MIC token from GSS-API for the message we created
+ with ``_ssh_build_mic``.
+ gssapi-keyex:
+ Returns the MIC token from GSS-API with the SSH session ID as
+ message.
+ :rtype: str
+ """
+ self._session_id = session_id
+ if not gss_kex:
+ mic_field = self._ssh_build_mic(
+ self._session_id,
+ self._username,
+ self._service,
+ self._auth_method,
+ )
+ mic_token = self._gss_ctxt.get_signature(mic_field)
+ else:
+ # for key exchange with gssapi-keyex
+ mic_token = self._gss_srv_ctxt.get_signature(self._session_id)
+ return mic_token
+
+ def ssh_accept_sec_context(self, hostname, recv_token, username=None):
+ """
+ Accept a GSS-API context (server mode).
+
+ :param str hostname: The servers hostname
+ :param str username: The name of the user who attempts to login
+ :param str recv_token: The GSS-API Token received from the server,
+ if it's not the initial call.
+ :return: A ``String`` if the GSS-API has returned a token or ``None``
+ if no token was returned
+ """
+ # hostname and username are not required for GSSAPI, but for SSPI
+ self._gss_host = hostname
+ self._username = username
+ if self._gss_srv_ctxt is None:
+ self._gss_srv_ctxt = gssapi.SecurityContext(usage="accept")
+ token = self._gss_srv_ctxt.step(recv_token)
+ self._gss_srv_ctxt_status = self._gss_srv_ctxt.complete
+ return token
+
+ def ssh_check_mic(self, mic_token, session_id, username=None):
+ """
+ Verify the MIC token for a SSH2 message.
+
+ :param str mic_token: The MIC token received from the client
+ :param str session_id: The SSH session ID
+ :param str username: The name of the user who attempts to login
+ :return: None if the MIC check was successful
+ :raises: ``gssapi.exceptions.GSSError`` -- if the MIC check failed
+ """
+ self._session_id = session_id
+ self._username = username
+ if self._username is not None:
+ # server mode
+ mic_field = self._ssh_build_mic(
+ self._session_id,
+ self._username,
+ self._service,
+ self._auth_method,
+ )
+ self._gss_srv_ctxt.verify_signature(mic_field, mic_token)
+ else:
+ # for key exchange with gssapi-keyex
+ # client mode
+ self._gss_ctxt.verify_signature(self._session_id, mic_token)
+
+ @property
+ def credentials_delegated(self):
+ """
+ Checks if credentials are delegated (server mode).
+
+ :return: ``True`` if credentials are delegated, otherwise ``False``
+ :rtype: bool
+ """
+ if self._gss_srv_ctxt.delegated_creds is not None:
+ return True
+ return False
+
+ def save_client_creds(self, client_token):
+ """
+ Save the Client token in a file. This is used by the SSH server
+ to store the client credentials if credentials are delegated
+ (server mode).
+
+ :param str client_token: The GSS-API token received form the client
+ :raises: ``NotImplementedError`` -- Credential delegation is currently
+ not supported in server mode
+ """
+ raise NotImplementedError
+
+
+class _SSH_SSPI(_SSH_GSSAuth):
+ """
+ Implementation of the Microsoft SSPI Kerberos Authentication for SSH2.
+
+ :see: `.GSSAuth`
+ """
+
+ def __init__(self, auth_method, gss_deleg_creds):
+ """
+ :param str auth_method: The name of the SSH authentication mechanism
+ (gssapi-with-mic or gss-keyex)
+ :param bool gss_deleg_creds: Delegate client credentials or not
+ """
+ _SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
+
+ if self._gss_deleg_creds:
+ self._gss_flags = (
+ sspicon.ISC_REQ_INTEGRITY
+ | sspicon.ISC_REQ_MUTUAL_AUTH
+ | sspicon.ISC_REQ_DELEGATE
+ )
+ else:
+ self._gss_flags = (
+ sspicon.ISC_REQ_INTEGRITY | sspicon.ISC_REQ_MUTUAL_AUTH
+ )
+
+ def ssh_init_sec_context(
+ self, target, desired_mech=None, username=None, recv_token=None
+ ):
+ """
+ Initialize a SSPI context.
+
+ :param str username: The name of the user who attempts to login
+ :param str target: The FQDN of the target to connect to
+ :param str desired_mech: The negotiated SSPI mechanism
+ ("pseudo negotiated" mechanism, because we
+ support just the krb5 mechanism :-))
+ :param recv_token: The SSPI token received from the Server
+ :raises:
+ `.SSHException` -- Is raised if the desired mechanism of the client
+ is not supported
+ :return: A ``String`` if the SSPI has returned a token or ``None`` if
+ no token was returned
+ """
+ from pyasn1.codec.der import decoder
+
+ self._username = username
+ self._gss_host = target
+ error = 0
+ targ_name = "host/" + self._gss_host
+ if desired_mech is not None:
+ mech, __ = decoder.decode(desired_mech)
+ if mech.__str__() != self._krb5_mech:
+ raise SSHException("Unsupported mechanism OID.")
+ try:
+ if recv_token is None:
+ self._gss_ctxt = sspi.ClientAuth(
+ "Kerberos", scflags=self._gss_flags, targetspn=targ_name
+ )
+ error, token = self._gss_ctxt.authorize(recv_token)
+ token = token[0].Buffer
+ except pywintypes.error as e:
+ e.strerror += ", Target: {}".format(self._gss_host)
+ raise
+
+ if error == 0:
+ """
+ if the status is GSS_COMPLETE (error = 0) the context is fully
+ established an we can set _gss_ctxt_status to True.
+ """
+ self._gss_ctxt_status = True
+ token = None
+ """
+ You won't get another token if the context is fully established,
+ so i set token to None instead of ""
+ """
+ return token
+
+ def ssh_get_mic(self, session_id, gss_kex=False):
+ """
+ Create the MIC token for a SSH2 message.
+
+ :param str session_id: The SSH session ID
+ :param bool gss_kex: Generate the MIC for Key Exchange with SSPI or not
+ :return: gssapi-with-mic:
+ Returns the MIC token from SSPI for the message we created
+ with ``_ssh_build_mic``.
+ gssapi-keyex:
+ Returns the MIC token from SSPI with the SSH session ID as
+ message.
+ """
+ self._session_id = session_id
+ if not gss_kex:
+ mic_field = self._ssh_build_mic(
+ self._session_id,
+ self._username,
+ self._service,
+ self._auth_method,
+ )
+ mic_token = self._gss_ctxt.sign(mic_field)
+ else:
+ # for key exchange with gssapi-keyex
+ mic_token = self._gss_srv_ctxt.sign(self._session_id)
+ return mic_token
+
+ def ssh_accept_sec_context(self, hostname, username, recv_token):
+ """
+ Accept a SSPI context (server mode).
+
+ :param str hostname: The servers FQDN
+ :param str username: The name of the user who attempts to login
+ :param str recv_token: The SSPI Token received from the server,
+ if it's not the initial call.
+ :return: A ``String`` if the SSPI has returned a token or ``None`` if
+ no token was returned
+ """
+ self._gss_host = hostname
+ self._username = username
+ targ_name = "host/" + self._gss_host
+ self._gss_srv_ctxt = sspi.ServerAuth("Kerberos", spn=targ_name)
+ error, token = self._gss_srv_ctxt.authorize(recv_token)
+ token = token[0].Buffer
+ if error == 0:
+ self._gss_srv_ctxt_status = True
+ token = None
+ return token
+
+ def ssh_check_mic(self, mic_token, session_id, username=None):
+ """
+ Verify the MIC token for a SSH2 message.
+
+ :param str mic_token: The MIC token received from the client
+ :param str session_id: The SSH session ID
+ :param str username: The name of the user who attempts to login
+ :return: None if the MIC check was successful
+ :raises: ``sspi.error`` -- if the MIC check failed
+ """
+ self._session_id = session_id
+ self._username = username
+ if username is not None:
+ # server mode
+ mic_field = self._ssh_build_mic(
+ self._session_id,
+ self._username,
+ self._service,
+ self._auth_method,
+ )
+ # Verifies data and its signature. If verification fails, an
+ # sspi.error will be raised.
+ self._gss_srv_ctxt.verify(mic_field, mic_token)
+ else:
+ # for key exchange with gssapi-keyex
+ # client mode
+ # Verifies data and its signature. If verification fails, an
+ # sspi.error will be raised.
+ self._gss_ctxt.verify(self._session_id, mic_token)
+
+ @property
+ def credentials_delegated(self):
+ """
+ Checks if credentials are delegated (server mode).
+
+ :return: ``True`` if credentials are delegated, otherwise ``False``
+ """
+ return self._gss_flags & sspicon.ISC_REQ_DELEGATE and (
+ self._gss_srv_ctxt_status or self._gss_flags
+ )
+
+ def save_client_creds(self, client_token):
+ """
+ Save the Client token in a file. This is used by the SSH server
+ to store the client credentails if credentials are delegated
+ (server mode).
+
+ :param str client_token: The SSPI token received form the client
+ :raises:
+ ``NotImplementedError`` -- Credential delegation is currently not
+ supported in server mode
+ """
+ raise NotImplementedError
diff --git a/paramiko/transport.py b/paramiko/transport.py
new file mode 100644
index 0000000..8301917
--- /dev/null
+++ b/paramiko/transport.py
@@ -0,0 +1,3389 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Core protocol implementation
+"""
+
+import os
+import socket
+import sys
+import threading
+import time
+import weakref
+from hashlib import md5, sha1, sha256, sha512
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
+
+import paramiko
+from paramiko import util
+from paramiko.auth_handler import AuthHandler, AuthOnlyHandler
+from paramiko.ssh_gss import GSSAuth
+from paramiko.channel import Channel
+from paramiko.common import (
+ xffffffff,
+ cMSG_CHANNEL_OPEN,
+ cMSG_IGNORE,
+ cMSG_GLOBAL_REQUEST,
+ DEBUG,
+ MSG_KEXINIT,
+ MSG_IGNORE,
+ MSG_DISCONNECT,
+ MSG_DEBUG,
+ ERROR,
+ WARNING,
+ cMSG_UNIMPLEMENTED,
+ INFO,
+ cMSG_KEXINIT,
+ cMSG_NEWKEYS,
+ MSG_NEWKEYS,
+ cMSG_REQUEST_SUCCESS,
+ cMSG_REQUEST_FAILURE,
+ CONNECTION_FAILED_CODE,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ OPEN_SUCCEEDED,
+ cMSG_CHANNEL_OPEN_FAILURE,
+ cMSG_CHANNEL_OPEN_SUCCESS,
+ MSG_GLOBAL_REQUEST,
+ MSG_REQUEST_SUCCESS,
+ MSG_REQUEST_FAILURE,
+ cMSG_SERVICE_REQUEST,
+ MSG_SERVICE_ACCEPT,
+ MSG_CHANNEL_OPEN_SUCCESS,
+ MSG_CHANNEL_OPEN_FAILURE,
+ MSG_CHANNEL_OPEN,
+ MSG_CHANNEL_SUCCESS,
+ MSG_CHANNEL_FAILURE,
+ MSG_CHANNEL_DATA,
+ MSG_CHANNEL_EXTENDED_DATA,
+ MSG_CHANNEL_WINDOW_ADJUST,
+ MSG_CHANNEL_REQUEST,
+ MSG_CHANNEL_EOF,
+ MSG_CHANNEL_CLOSE,
+ MIN_WINDOW_SIZE,
+ MIN_PACKET_SIZE,
+ MAX_WINDOW_SIZE,
+ DEFAULT_WINDOW_SIZE,
+ DEFAULT_MAX_PACKET_SIZE,
+ HIGHEST_USERAUTH_MESSAGE_ID,
+ MSG_UNIMPLEMENTED,
+ MSG_NAMES,
+ MSG_EXT_INFO,
+ cMSG_EXT_INFO,
+ byte_ord,
+)
+from paramiko.compress import ZlibCompressor, ZlibDecompressor
+from paramiko.dsskey import DSSKey
+from paramiko.ed25519key import Ed25519Key
+from paramiko.kex_curve25519 import KexCurve25519
+from paramiko.kex_gex import KexGex, KexGexSHA256
+from paramiko.kex_group1 import KexGroup1
+from paramiko.kex_group14 import KexGroup14, KexGroup14SHA256
+from paramiko.kex_group16 import KexGroup16SHA512
+from paramiko.kex_ecdh_nist import KexNistp256, KexNistp384, KexNistp521
+from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14
+from paramiko.message import Message
+from paramiko.packet import Packetizer, NeedRekeyException
+from paramiko.primes import ModulusPack
+from paramiko.rsakey import RSAKey
+from paramiko.ecdsakey import ECDSAKey
+from paramiko.server import ServerInterface
+from paramiko.sftp_client import SFTPClient
+from paramiko.ssh_exception import (
+ BadAuthenticationType,
+ ChannelException,
+ IncompatiblePeer,
+ MessageOrderError,
+ ProxyCommandFailure,
+ SSHException,
+)
+from paramiko.util import (
+ ClosingContextManager,
+ clamp_value,
+ b,
+)
+
+
+# for thread cleanup
+_active_threads = []
+
+
+def _join_lingering_threads():
+ for thr in _active_threads:
+ thr.stop_thread()
+
+
+import atexit
+
+atexit.register(_join_lingering_threads)
+
+
+class Transport(threading.Thread, ClosingContextManager):
+ """
+ An SSH Transport attaches to a stream (usually a socket), negotiates an
+ encrypted session, authenticates, and then creates stream tunnels, called
+ `channels <.Channel>`, across the session. Multiple channels can be
+ multiplexed across a single session (and often are, in the case of port
+ forwardings).
+
+ Instances of this class may be used as context managers.
+ """
+
+ _ENCRYPT = object()
+ _DECRYPT = object()
+
+ _PROTO_ID = "2.0"
+ _CLIENT_ID = "paramiko_{}".format(paramiko.__version__)
+
+ # These tuples of algorithm identifiers are in preference order; do not
+ # reorder without reason!
+ # NOTE: if you need to modify these, we suggest leveraging the
+ # `disabled_algorithms` constructor argument (also available in SSHClient)
+ # instead of monkeypatching or subclassing.
+ _preferred_ciphers = (
+ "aes128-ctr",
+ "aes192-ctr",
+ "aes256-ctr",
+ "aes128-cbc",
+ "aes192-cbc",
+ "aes256-cbc",
+ "3des-cbc",
+ )
+ _preferred_macs = (
+ "hmac-sha2-256",
+ "hmac-sha2-512",
+ "hmac-sha2-256-etm@openssh.com",
+ "hmac-sha2-512-etm@openssh.com",
+ "hmac-sha1",
+ "hmac-md5",
+ "hmac-sha1-96",
+ "hmac-md5-96",
+ )
+ # ~= HostKeyAlgorithms in OpenSSH land
+ _preferred_keys = (
+ "ssh-ed25519",
+ "ecdsa-sha2-nistp256",
+ "ecdsa-sha2-nistp384",
+ "ecdsa-sha2-nistp521",
+ "rsa-sha2-512",
+ "rsa-sha2-256",
+ "ssh-rsa",
+ "ssh-dss",
+ )
+ # ~= PubKeyAcceptedAlgorithms
+ _preferred_pubkeys = (
+ "ssh-ed25519",
+ "ecdsa-sha2-nistp256",
+ "ecdsa-sha2-nistp384",
+ "ecdsa-sha2-nistp521",
+ "rsa-sha2-512",
+ "rsa-sha2-256",
+ "ssh-rsa",
+ "ssh-dss",
+ )
+ _preferred_kex = (
+ "ecdh-sha2-nistp256",
+ "ecdh-sha2-nistp384",
+ "ecdh-sha2-nistp521",
+ "diffie-hellman-group16-sha512",
+ "diffie-hellman-group-exchange-sha256",
+ "diffie-hellman-group14-sha256",
+ "diffie-hellman-group-exchange-sha1",
+ "diffie-hellman-group14-sha1",
+ "diffie-hellman-group1-sha1",
+ )
+ if KexCurve25519.is_available():
+ _preferred_kex = ("curve25519-sha256@libssh.org",) + _preferred_kex
+ _preferred_gsskex = (
+ "gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==",
+ "gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==",
+ "gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==",
+ )
+ _preferred_compression = ("none",)
+
+ _cipher_info = {
+ "aes128-ctr": {
+ "class": algorithms.AES,
+ "mode": modes.CTR,
+ "block-size": 16,
+ "key-size": 16,
+ },
+ "aes192-ctr": {
+ "class": algorithms.AES,
+ "mode": modes.CTR,
+ "block-size": 16,
+ "key-size": 24,
+ },
+ "aes256-ctr": {
+ "class": algorithms.AES,
+ "mode": modes.CTR,
+ "block-size": 16,
+ "key-size": 32,
+ },
+ "aes128-cbc": {
+ "class": algorithms.AES,
+ "mode": modes.CBC,
+ "block-size": 16,
+ "key-size": 16,
+ },
+ "aes192-cbc": {
+ "class": algorithms.AES,
+ "mode": modes.CBC,
+ "block-size": 16,
+ "key-size": 24,
+ },
+ "aes256-cbc": {
+ "class": algorithms.AES,
+ "mode": modes.CBC,
+ "block-size": 16,
+ "key-size": 32,
+ },
+ "3des-cbc": {
+ "class": algorithms.TripleDES,
+ "mode": modes.CBC,
+ "block-size": 8,
+ "key-size": 24,
+ },
+ }
+
+ _mac_info = {
+ "hmac-sha1": {"class": sha1, "size": 20},
+ "hmac-sha1-96": {"class": sha1, "size": 12},
+ "hmac-sha2-256": {"class": sha256, "size": 32},
+ "hmac-sha2-256-etm@openssh.com": {"class": sha256, "size": 32},
+ "hmac-sha2-512": {"class": sha512, "size": 64},
+ "hmac-sha2-512-etm@openssh.com": {"class": sha512, "size": 64},
+ "hmac-md5": {"class": md5, "size": 16},
+ "hmac-md5-96": {"class": md5, "size": 12},
+ }
+
+ _key_info = {
+ # TODO: at some point we will want to drop this as it's no longer
+ # considered secure due to using SHA-1 for signatures. OpenSSH 8.8 no
+ # longer supports it. Question becomes at what point do we want to
+ # prevent users with older setups from using this?
+ "ssh-rsa": RSAKey,
+ "ssh-rsa-cert-v01@openssh.com": RSAKey,
+ "rsa-sha2-256": RSAKey,
+ "rsa-sha2-256-cert-v01@openssh.com": RSAKey,
+ "rsa-sha2-512": RSAKey,
+ "rsa-sha2-512-cert-v01@openssh.com": RSAKey,
+ "ssh-dss": DSSKey,
+ "ssh-dss-cert-v01@openssh.com": DSSKey,
+ "ecdsa-sha2-nistp256": ECDSAKey,
+ "ecdsa-sha2-nistp256-cert-v01@openssh.com": ECDSAKey,
+ "ecdsa-sha2-nistp384": ECDSAKey,
+ "ecdsa-sha2-nistp384-cert-v01@openssh.com": ECDSAKey,
+ "ecdsa-sha2-nistp521": ECDSAKey,
+ "ecdsa-sha2-nistp521-cert-v01@openssh.com": ECDSAKey,
+ "ssh-ed25519": Ed25519Key,
+ "ssh-ed25519-cert-v01@openssh.com": Ed25519Key,
+ }
+
+ _kex_info = {
+ "diffie-hellman-group1-sha1": KexGroup1,
+ "diffie-hellman-group14-sha1": KexGroup14,
+ "diffie-hellman-group-exchange-sha1": KexGex,
+ "diffie-hellman-group-exchange-sha256": KexGexSHA256,
+ "diffie-hellman-group14-sha256": KexGroup14SHA256,
+ "diffie-hellman-group16-sha512": KexGroup16SHA512,
+ "gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup1,
+ "gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGroup14,
+ "gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==": KexGSSGex,
+ "ecdh-sha2-nistp256": KexNistp256,
+ "ecdh-sha2-nistp384": KexNistp384,
+ "ecdh-sha2-nistp521": KexNistp521,
+ }
+ if KexCurve25519.is_available():
+ _kex_info["curve25519-sha256@libssh.org"] = KexCurve25519
+
+ _compression_info = {
+ # zlib@openssh.com is just zlib, but only turned on after a successful
+ # authentication. openssh servers may only offer this type because
+ # they've had troubles with security holes in zlib in the past.
+ "zlib@openssh.com": (ZlibCompressor, ZlibDecompressor),
+ "zlib": (ZlibCompressor, ZlibDecompressor),
+ "none": (None, None),
+ }
+
+ _modulus_pack = None
+ _active_check_timeout = 0.1
+
+ def __init__(
+ self,
+ sock,
+ default_window_size=DEFAULT_WINDOW_SIZE,
+ default_max_packet_size=DEFAULT_MAX_PACKET_SIZE,
+ gss_kex=False,
+ gss_deleg_creds=True,
+ disabled_algorithms=None,
+ server_sig_algs=True,
+ strict_kex=True,
+ packetizer_class=None,
+ ):
+ """
+ Create a new SSH session over an existing socket, or socket-like
+ object. This only creates the `.Transport` object; it doesn't begin
+ the SSH session yet. Use `connect` or `start_client` to begin a client
+ session, or `start_server` to begin a server session.
+
+ If the object is not actually a socket, it must have the following
+ methods:
+
+ - ``send(bytes)``: Writes from 1 to ``len(bytes)`` bytes, and returns
+ an int representing the number of bytes written. Returns
+ 0 or raises ``EOFError`` if the stream has been closed.
+ - ``recv(int)``: Reads from 1 to ``int`` bytes and returns them as a
+ string. Returns 0 or raises ``EOFError`` if the stream has been
+ closed.
+ - ``close()``: Closes the socket.
+ - ``settimeout(n)``: Sets a (float) timeout on I/O operations.
+
+ For ease of use, you may also pass in an address (as a tuple) or a host
+ string as the ``sock`` argument. (A host string is a hostname with an
+ optional port (separated by ``":"``) which will be converted into a
+ tuple of ``(hostname, port)``.) A socket will be connected to this
+ address and used for communication. Exceptions from the ``socket``
+ call may be thrown in this case.
+
+ .. note::
+ Modifying the the window and packet sizes might have adverse
+ effects on your channels created from this transport. The default
+ values are the same as in the OpenSSH code base and have been
+ battle tested.
+
+ :param socket sock:
+ a socket or socket-like object to create the session over.
+ :param int default_window_size:
+ sets the default window size on the transport. (defaults to
+ 2097152)
+ :param int default_max_packet_size:
+ sets the default max packet size on the transport. (defaults to
+ 32768)
+ :param bool gss_kex:
+ Whether to enable GSSAPI key exchange when GSSAPI is in play.
+ Default: ``False``.
+ :param bool gss_deleg_creds:
+ Whether to enable GSSAPI credential delegation when GSSAPI is in
+ play. Default: ``True``.
+ :param dict disabled_algorithms:
+ If given, must be a dictionary mapping algorithm type to an
+ iterable of algorithm identifiers, which will be disabled for the
+ lifetime of the transport.
+
+ Keys should match the last word in the class' builtin algorithm
+ tuple attributes, such as ``"ciphers"`` to disable names within
+ ``_preferred_ciphers``; or ``"kex"`` to disable something defined
+ inside ``_preferred_kex``. Values should exactly match members of
+ the matching attribute.
+
+ For example, if you need to disable
+ ``diffie-hellman-group16-sha512`` key exchange (perhaps because
+ your code talks to a server which implements it differently from
+ Paramiko), specify ``disabled_algorithms={"kex":
+ ["diffie-hellman-group16-sha512"]}``.
+ :param bool server_sig_algs:
+ Whether to send an extra message to compatible clients, in server
+ mode, with a list of supported pubkey algorithms. Default:
+ ``True``.
+ :param bool strict_kex:
+ Whether to advertise (and implement, if client also advertises
+ support for) a "strict kex" mode for safer handshaking. Default:
+ ``True``.
+ :param packetizer_class:
+ Which class to use for instantiating the internal packet handler.
+ Default: ``None`` (i.e.: use `Packetizer` as normal).
+
+ .. versionchanged:: 1.15
+ Added the ``default_window_size`` and ``default_max_packet_size``
+ arguments.
+ .. versionchanged:: 1.15
+ Added the ``gss_kex`` and ``gss_deleg_creds`` kwargs.
+ .. versionchanged:: 2.6
+ Added the ``disabled_algorithms`` kwarg.
+ .. versionchanged:: 2.9
+ Added the ``server_sig_algs`` kwarg.
+ .. versionchanged:: 3.4
+ Added the ``strict_kex`` kwarg.
+ .. versionchanged:: 3.4
+ Added the ``packetizer_class`` kwarg.
+ """
+ self.active = False
+ self.hostname = None
+ self.server_extensions = {}
+ self.advertise_strict_kex = strict_kex
+ self.agreed_on_strict_kex = False
+
+ # TODO: these two overrides on sock's type should go away sometime, too
+ # many ways to do it!
+ if isinstance(sock, str):
+ # convert "host:port" into (host, port)
+ hl = sock.split(":", 1)
+ self.hostname = hl[0]
+ if len(hl) == 1:
+ sock = (hl[0], 22)
+ else:
+ sock = (hl[0], int(hl[1]))
+ if type(sock) is tuple:
+ # connect to the given (host, port)
+ hostname, port = sock
+ self.hostname = hostname
+ reason = "No suitable address family"
+ addrinfos = socket.getaddrinfo(
+ hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
+ )
+ for family, socktype, proto, canonname, sockaddr in addrinfos:
+ if socktype == socket.SOCK_STREAM:
+ af = family
+ # addr = sockaddr
+ sock = socket.socket(af, socket.SOCK_STREAM)
+ try:
+ sock.connect((hostname, port))
+ except socket.error as e:
+ reason = str(e)
+ else:
+ break
+ else:
+ raise SSHException(
+ "Unable to connect to {}: {}".format(hostname, reason)
+ )
+ # okay, normal socket-ish flow here...
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.sock = sock
+ # we set the timeout so we can check self.active periodically to
+ # see if we should bail. socket.timeout exception is never propagated.
+ self.sock.settimeout(self._active_check_timeout)
+
+ # negotiated crypto parameters
+ self.packetizer = (packetizer_class or Packetizer)(sock)
+ self.local_version = "SSH-" + self._PROTO_ID + "-" + self._CLIENT_ID
+ self.remote_version = ""
+ self.local_cipher = self.remote_cipher = ""
+ self.local_kex_init = self.remote_kex_init = None
+ self.local_mac = self.remote_mac = None
+ self.local_compression = self.remote_compression = None
+ self.session_id = None
+ self.host_key_type = None
+ self.host_key = None
+
+ # GSS-API / SSPI Key Exchange
+ self.use_gss_kex = gss_kex
+ # This will be set to True if GSS-API Key Exchange was performed
+ self.gss_kex_used = False
+ self.kexgss_ctxt = None
+ self.gss_host = None
+ if self.use_gss_kex:
+ self.kexgss_ctxt = GSSAuth("gssapi-keyex", gss_deleg_creds)
+ self._preferred_kex = self._preferred_gsskex + self._preferred_kex
+
+ # state used during negotiation
+ self.kex_engine = None
+ self.H = None
+ self.K = None
+
+ self.initial_kex_done = False
+ self.in_kex = False
+ self.authenticated = False
+ self._expected_packet = tuple()
+ # synchronization (always higher level than write_lock)
+ self.lock = threading.Lock()
+
+ # tracking open channels
+ self._channels = ChannelMap()
+ self.channel_events = {} # (id -> Event)
+ self.channels_seen = {} # (id -> True)
+ self._channel_counter = 0
+ self.default_max_packet_size = default_max_packet_size
+ self.default_window_size = default_window_size
+ self._forward_agent_handler = None
+ self._x11_handler = None
+ self._tcp_handler = None
+
+ self.saved_exception = None
+ self.clear_to_send = threading.Event()
+ self.clear_to_send_lock = threading.Lock()
+ self.clear_to_send_timeout = 30.0
+ self.log_name = "paramiko.transport"
+ self.logger = util.get_logger(self.log_name)
+ self.packetizer.set_log(self.logger)
+ self.auth_handler = None
+ # response Message from an arbitrary global request
+ self.global_response = None
+ # user-defined event callbacks
+ self.completion_event = None
+ # how long (seconds) to wait for the SSH banner
+ self.banner_timeout = 15
+ # how long (seconds) to wait for the handshake to finish after SSH
+ # banner sent.
+ self.handshake_timeout = 15
+ # how long (seconds) to wait for the auth response.
+ self.auth_timeout = 30
+ # how long (seconds) to wait for opening a channel
+ self.channel_timeout = 60 * 60
+ self.disabled_algorithms = disabled_algorithms or {}
+ self.server_sig_algs = server_sig_algs
+
+ # server mode:
+ self.server_mode = False
+ self.server_object = None
+ self.server_key_dict = {}
+ self.server_accepts = []
+ self.server_accept_cv = threading.Condition(self.lock)
+ self.subsystem_table = {}
+
+ # Handler table, now set at init time for easier per-instance
+ # manipulation and subclass twiddling.
+ self._handler_table = {
+ MSG_EXT_INFO: self._parse_ext_info,
+ MSG_NEWKEYS: self._parse_newkeys,
+ MSG_GLOBAL_REQUEST: self._parse_global_request,
+ MSG_REQUEST_SUCCESS: self._parse_request_success,
+ MSG_REQUEST_FAILURE: self._parse_request_failure,
+ MSG_CHANNEL_OPEN_SUCCESS: self._parse_channel_open_success,
+ MSG_CHANNEL_OPEN_FAILURE: self._parse_channel_open_failure,
+ MSG_CHANNEL_OPEN: self._parse_channel_open,
+ MSG_KEXINIT: self._negotiate_keys,
+ }
+
+ def _filter_algorithm(self, type_):
+ default = getattr(self, "_preferred_{}".format(type_))
+ return tuple(
+ x
+ for x in default
+ if x not in self.disabled_algorithms.get(type_, [])
+ )
+
+ @property
+ def preferred_ciphers(self):
+ return self._filter_algorithm("ciphers")
+
+ @property
+ def preferred_macs(self):
+ return self._filter_algorithm("macs")
+
+ @property
+ def preferred_keys(self):
+ # Interleave cert variants here; resistant to various background
+ # overwriting of _preferred_keys, and necessary as hostkeys can't use
+ # the logic pubkey auth does re: injecting/checking for certs at
+ # runtime
+ filtered = self._filter_algorithm("keys")
+ return tuple(
+ filtered
+ + tuple("{}-cert-v01@openssh.com".format(x) for x in filtered)
+ )
+
+ @property
+ def preferred_pubkeys(self):
+ return self._filter_algorithm("pubkeys")
+
+ @property
+ def preferred_kex(self):
+ return self._filter_algorithm("kex")
+
+ @property
+ def preferred_compression(self):
+ return self._filter_algorithm("compression")
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+ """
+ id_ = hex(id(self) & xffffffff)
+ out = "<paramiko.Transport at {}".format(id_)
+ if not self.active:
+ out += " (unconnected)"
+ else:
+ if self.local_cipher != "":
+ out += " (cipher {}, {:d} bits)".format(
+ self.local_cipher,
+ self._cipher_info[self.local_cipher]["key-size"] * 8,
+ )
+ if self.is_authenticated():
+ out += " (active; {} open channel(s))".format(
+ len(self._channels)
+ )
+ elif self.initial_kex_done:
+ out += " (connected; awaiting auth)"
+ else:
+ out += " (connecting)"
+ out += ">"
+ return out
+
+ def atfork(self):
+ """
+ Terminate this Transport without closing the session. On posix
+ systems, if a Transport is open during process forking, both parent
+ and child will share the underlying socket, but only one process can
+ use the connection (without corrupting the session). Use this method
+ to clean up a Transport object without disrupting the other process.
+
+ .. versionadded:: 1.5.3
+ """
+ self.sock.close()
+ self.close()
+
+ def get_security_options(self):
+ """
+ Return a `.SecurityOptions` object which can be used to tweak the
+ encryption algorithms this transport will permit (for encryption,
+ digest/hash operations, public keys, and key exchanges) and the order
+ of preference for them.
+ """
+ return SecurityOptions(self)
+
+ def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
+ """
+ Normalize/canonicalize ``self.gss_host`` depending on various factors.
+
+ :param str gss_host:
+ The explicitly requested GSS-oriented hostname to connect to (i.e.
+ what the host's name is in the Kerberos database.) Defaults to
+ ``self.hostname`` (which will be the 'real' target hostname and/or
+ host portion of given socket object.)
+ :param bool trust_dns:
+ Indicates whether or not DNS is trusted; if true, DNS will be used
+ to canonicalize the GSS hostname (which again will either be
+ ``gss_host`` or the transport's default hostname.)
+ (Defaults to True due to backwards compatibility.)
+ :param bool gssapi_requested:
+ Whether GSSAPI key exchange or authentication was even requested.
+ If not, this is a no-op and nothing happens
+ (and ``self.gss_host`` is not set.)
+ (Defaults to True due to backwards compatibility.)
+ :returns: ``None``.
+ """
+ # No GSSAPI in play == nothing to do
+ if not gssapi_requested:
+ return
+ # Obtain the correct host first - did user request a GSS-specific name
+ # to use that is distinct from the actual SSH target hostname?
+ if gss_host is None:
+ gss_host = self.hostname
+ # Finally, canonicalize via DNS if DNS is trusted.
+ if trust_dns and gss_host is not None:
+ gss_host = socket.getfqdn(gss_host)
+ # And set attribute for reference later.
+ self.gss_host = gss_host
+
+ def start_client(self, event=None, timeout=None):
+ """
+ Negotiate a new SSH2 session as a client. This is the first step after
+ creating a new `.Transport`. A separate thread is created for protocol
+ negotiation.
+
+ If an event is passed in, this method returns immediately. When
+ negotiation is done (successful or not), the given ``Event`` will
+ be triggered. On failure, `is_active` will return ``False``.
+
+ (Since 1.4) If ``event`` is ``None``, this method will not return until
+ negotiation is done. On success, the method returns normally.
+ Otherwise an SSHException is raised.
+
+ After a successful negotiation, you will usually want to authenticate,
+ calling `auth_password <Transport.auth_password>` or
+ `auth_publickey <Transport.auth_publickey>`.
+
+ .. note:: `connect` is a simpler method for connecting as a client.
+
+ .. note::
+ After calling this method (or `start_server` or `connect`), you
+ should no longer directly read from or write to the original socket
+ object.
+
+ :param .threading.Event event:
+ an event to trigger when negotiation is complete (optional)
+
+ :param float timeout:
+ a timeout, in seconds, for SSH2 session negotiation (optional)
+
+ :raises:
+ `.SSHException` -- if negotiation fails (and no ``event`` was
+ passed in)
+ """
+ self.active = True
+ if event is not None:
+ # async, return immediately and let the app poll for completion
+ self.completion_event = event
+ self.start()
+ return
+
+ # synchronous, wait for a result
+ self.completion_event = event = threading.Event()
+ self.start()
+ max_time = time.time() + timeout if timeout is not None else None
+ while True:
+ event.wait(0.1)
+ if not self.active:
+ e = self.get_exception()
+ if e is not None:
+ raise e
+ raise SSHException("Negotiation failed.")
+ if event.is_set() or (
+ timeout is not None and time.time() >= max_time
+ ):
+ break
+
+ def start_server(self, event=None, server=None):
+ """
+ Negotiate a new SSH2 session as a server. This is the first step after
+ creating a new `.Transport` and setting up your server host key(s). A
+ separate thread is created for protocol negotiation.
+
+ If an event is passed in, this method returns immediately. When
+ negotiation is done (successful or not), the given ``Event`` will
+ be triggered. On failure, `is_active` will return ``False``.
+
+ (Since 1.4) If ``event`` is ``None``, this method will not return until
+ negotiation is done. On success, the method returns normally.
+ Otherwise an SSHException is raised.
+
+ After a successful negotiation, the client will need to authenticate.
+ Override the methods `get_allowed_auths
+ <.ServerInterface.get_allowed_auths>`, `check_auth_none
+ <.ServerInterface.check_auth_none>`, `check_auth_password
+ <.ServerInterface.check_auth_password>`, and `check_auth_publickey
+ <.ServerInterface.check_auth_publickey>` in the given ``server`` object
+ to control the authentication process.
+
+ After a successful authentication, the client should request to open a
+ channel. Override `check_channel_request
+ <.ServerInterface.check_channel_request>` in the given ``server``
+ object to allow channels to be opened.
+
+ .. note::
+ After calling this method (or `start_client` or `connect`), you
+ should no longer directly read from or write to the original socket
+ object.
+
+ :param .threading.Event event:
+ an event to trigger when negotiation is complete.
+ :param .ServerInterface server:
+ an object used to perform authentication and create `channels
+ <.Channel>`
+
+ :raises:
+ `.SSHException` -- if negotiation fails (and no ``event`` was
+ passed in)
+ """
+ if server is None:
+ server = ServerInterface()
+ self.server_mode = True
+ self.server_object = server
+ self.active = True
+ if event is not None:
+ # async, return immediately and let the app poll for completion
+ self.completion_event = event
+ self.start()
+ return
+
+ # synchronous, wait for a result
+ self.completion_event = event = threading.Event()
+ self.start()
+ while True:
+ event.wait(0.1)
+ if not self.active:
+ e = self.get_exception()
+ if e is not None:
+ raise e
+ raise SSHException("Negotiation failed.")
+ if event.is_set():
+ break
+
+ def add_server_key(self, key):
+ """
+ Add a host key to the list of keys used for server mode. When behaving
+ as a server, the host key is used to sign certain packets during the
+ SSH2 negotiation, so that the client can trust that we are who we say
+ we are. Because this is used for signing, the key must contain private
+ key info, not just the public half. Only one key of each type (RSA or
+ DSS) is kept.
+
+ :param .PKey key:
+ the host key to add, usually an `.RSAKey` or `.DSSKey`.
+ """
+ self.server_key_dict[key.get_name()] = key
+ # Handle SHA-2 extensions for RSA by ensuring that lookups into
+ # self.server_key_dict will yield this key for any of the algorithm
+ # names.
+ if isinstance(key, RSAKey):
+ self.server_key_dict["rsa-sha2-256"] = key
+ self.server_key_dict["rsa-sha2-512"] = key
+
+ def get_server_key(self):
+ """
+ Return the active host key, in server mode. After negotiating with the
+ client, this method will return the negotiated host key. If only one
+ type of host key was set with `add_server_key`, that's the only key
+ that will ever be returned. But in cases where you have set more than
+ one type of host key (for example, an RSA key and a DSS key), the key
+ type will be negotiated by the client, and this method will return the
+ key of the type agreed on. If the host key has not been negotiated
+ yet, ``None`` is returned. In client mode, the behavior is undefined.
+
+ :return:
+ host key (`.PKey`) of the type negotiated by the client, or
+ ``None``.
+ """
+ try:
+ return self.server_key_dict[self.host_key_type]
+ except KeyError:
+ pass
+ return None
+
+ @staticmethod
+ def load_server_moduli(filename=None):
+ """
+ (optional)
+ Load a file of prime moduli for use in doing group-exchange key
+ negotiation in server mode. It's a rather obscure option and can be
+ safely ignored.
+
+ In server mode, the remote client may request "group-exchange" key
+ negotiation, which asks the server to send a random prime number that
+ fits certain criteria. These primes are pretty difficult to compute,
+ so they can't be generated on demand. But many systems contain a file
+ of suitable primes (usually named something like ``/etc/ssh/moduli``).
+ If you call `load_server_moduli` and it returns ``True``, then this
+ file of primes has been loaded and we will support "group-exchange" in
+ server mode. Otherwise server mode will just claim that it doesn't
+ support that method of key negotiation.
+
+ :param str filename:
+ optional path to the moduli file, if you happen to know that it's
+ not in a standard location.
+ :return:
+ True if a moduli file was successfully loaded; False otherwise.
+
+ .. note:: This has no effect when used in client mode.
+ """
+ Transport._modulus_pack = ModulusPack()
+ # places to look for the openssh "moduli" file
+ file_list = ["/etc/ssh/moduli", "/usr/local/etc/moduli"]
+ if filename is not None:
+ file_list.insert(0, filename)
+ for fn in file_list:
+ try:
+ Transport._modulus_pack.read_file(fn)
+ return True
+ except IOError:
+ pass
+ # none succeeded
+ Transport._modulus_pack = None
+ return False
+
+ def close(self):
+ """
+ Close this session, and any open channels that are tied to it.
+ """
+ if not self.active:
+ return
+ self.stop_thread()
+ for chan in list(self._channels.values()):
+ chan._unlink()
+ self.sock.close()
+
+ def get_remote_server_key(self):
+ """
+ Return the host key of the server (in client mode).
+
+ .. note::
+ Previously this call returned a tuple of ``(key type, key
+ string)``. You can get the same effect by calling `.PKey.get_name`
+ for the key type, and ``str(key)`` for the key string.
+
+ :raises: `.SSHException` -- if no session is currently active.
+
+ :return: public key (`.PKey`) of the remote server
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ raise SSHException("No existing session")
+ return self.host_key
+
+ def is_active(self):
+ """
+ Return true if this session is active (open).
+
+ :return:
+ True if the session is still active (open); False if the session is
+ closed
+ """
+ return self.active
+
+ def open_session(
+ self, window_size=None, max_packet_size=None, timeout=None
+ ):
+ """
+ Request a new channel to the server, of type ``"session"``. This is
+ just an alias for calling `open_channel` with an argument of
+ ``"session"``.
+
+ .. note:: Modifying the the window and packet sizes might have adverse
+ effects on the session created. The default values are the same
+ as in the OpenSSH code base and have been battle tested.
+
+ :param int window_size:
+ optional window size for this session.
+ :param int max_packet_size:
+ optional max packet size for this session.
+
+ :return: a new `.Channel`
+
+ :raises:
+ `.SSHException` -- if the request is rejected or the session ends
+ prematurely
+
+ .. versionchanged:: 1.13.4/1.14.3/1.15.3
+ Added the ``timeout`` argument.
+ .. versionchanged:: 1.15
+ Added the ``window_size`` and ``max_packet_size`` arguments.
+ """
+ return self.open_channel(
+ "session",
+ window_size=window_size,
+ max_packet_size=max_packet_size,
+ timeout=timeout,
+ )
+
+ def open_x11_channel(self, src_addr=None):
+ """
+ Request a new channel to the client, of type ``"x11"``. This
+ is just an alias for ``open_channel('x11', src_addr=src_addr)``.
+
+ :param tuple src_addr:
+ the source address (``(str, int)``) of the x11 server (port is the
+ x11 port, ie. 6010)
+ :return: a new `.Channel`
+
+ :raises:
+ `.SSHException` -- if the request is rejected or the session ends
+ prematurely
+ """
+ return self.open_channel("x11", src_addr=src_addr)
+
+ def open_forward_agent_channel(self):
+ """
+ Request a new channel to the client, of type
+ ``"auth-agent@openssh.com"``.
+
+ This is just an alias for ``open_channel('auth-agent@openssh.com')``.
+
+ :return: a new `.Channel`
+
+ :raises: `.SSHException` --
+ if the request is rejected or the session ends prematurely
+ """
+ return self.open_channel("auth-agent@openssh.com")
+
+ def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
+ """
+ Request a new channel back to the client, of type ``forwarded-tcpip``.
+
+ This is used after a client has requested port forwarding, for sending
+ incoming connections back to the client.
+
+ :param src_addr: originator's address
+ :param dest_addr: local (server) connected address
+ """
+ return self.open_channel("forwarded-tcpip", dest_addr, src_addr)
+
+ def open_channel(
+ self,
+ kind,
+ dest_addr=None,
+ src_addr=None,
+ window_size=None,
+ max_packet_size=None,
+ timeout=None,
+ ):
+ """
+ Request a new channel to the server. `Channels <.Channel>` are
+ socket-like objects used for the actual transfer of data across the
+ session. You may only request a channel after negotiating encryption
+ (using `connect` or `start_client`) and authenticating.
+
+ .. note:: Modifying the the window and packet sizes might have adverse
+ effects on the channel created. The default values are the same
+ as in the OpenSSH code base and have been battle tested.
+
+ :param str kind:
+ the kind of channel requested (usually ``"session"``,
+ ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``)
+ :param tuple dest_addr:
+ the destination address (address + port tuple) of this port
+ forwarding, if ``kind`` is ``"forwarded-tcpip"`` or
+ ``"direct-tcpip"`` (ignored for other channel types)
+ :param src_addr: the source address of this port forwarding, if
+ ``kind`` is ``"forwarded-tcpip"``, ``"direct-tcpip"``, or ``"x11"``
+ :param int window_size:
+ optional window size for this session.
+ :param int max_packet_size:
+ optional max packet size for this session.
+ :param float timeout:
+ optional timeout opening a channel, default 3600s (1h)
+
+ :return: a new `.Channel` on success
+
+ :raises:
+ `.SSHException` -- if the request is rejected, the session ends
+ prematurely or there is a timeout opening a channel
+
+ .. versionchanged:: 1.15
+ Added the ``window_size`` and ``max_packet_size`` arguments.
+ """
+ if not self.active:
+ raise SSHException("SSH session not active")
+ timeout = self.channel_timeout if timeout is None else timeout
+ self.lock.acquire()
+ try:
+ window_size = self._sanitize_window_size(window_size)
+ max_packet_size = self._sanitize_packet_size(max_packet_size)
+ chanid = self._next_channel()
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_OPEN)
+ m.add_string(kind)
+ m.add_int(chanid)
+ m.add_int(window_size)
+ m.add_int(max_packet_size)
+ if (kind == "forwarded-tcpip") or (kind == "direct-tcpip"):
+ m.add_string(dest_addr[0])
+ m.add_int(dest_addr[1])
+ m.add_string(src_addr[0])
+ m.add_int(src_addr[1])
+ elif kind == "x11":
+ m.add_string(src_addr[0])
+ m.add_int(src_addr[1])
+ chan = Channel(chanid)
+ self._channels.put(chanid, chan)
+ self.channel_events[chanid] = event = threading.Event()
+ self.channels_seen[chanid] = True
+ chan._set_transport(self)
+ chan._set_window(window_size, max_packet_size)
+ finally:
+ self.lock.release()
+ self._send_user_message(m)
+ start_ts = time.time()
+ while True:
+ event.wait(0.1)
+ if not self.active:
+ e = self.get_exception()
+ if e is None:
+ e = SSHException("Unable to open channel.")
+ raise e
+ if event.is_set():
+ break
+ elif start_ts + timeout < time.time():
+ raise SSHException("Timeout opening channel.")
+ chan = self._channels.get(chanid)
+ if chan is not None:
+ return chan
+ e = self.get_exception()
+ if e is None:
+ e = SSHException("Unable to open channel.")
+ raise e
+
+ def request_port_forward(self, address, port, handler=None):
+ """
+ Ask the server to forward TCP connections from a listening port on
+ the server, across this SSH session.
+
+ If a handler is given, that handler is called from a different thread
+ whenever a forwarded connection arrives. The handler parameters are::
+
+ handler(
+ channel,
+ (origin_addr, origin_port),
+ (server_addr, server_port),
+ )
+
+ where ``server_addr`` and ``server_port`` are the address and port that
+ the server was listening on.
+
+ If no handler is set, the default behavior is to send new incoming
+ forwarded connections into the accept queue, to be picked up via
+ `accept`.
+
+ :param str address: the address to bind when forwarding
+ :param int port:
+ the port to forward, or 0 to ask the server to allocate any port
+ :param callable handler:
+ optional handler for incoming forwarded connections, of the form
+ ``func(Channel, (str, int), (str, int))``.
+
+ :return: the port number (`int`) allocated by the server
+
+ :raises:
+ `.SSHException` -- if the server refused the TCP forward request
+ """
+ if not self.active:
+ raise SSHException("SSH session not active")
+ port = int(port)
+ response = self.global_request(
+ "tcpip-forward", (address, port), wait=True
+ )
+ if response is None:
+ raise SSHException("TCP forwarding request denied")
+ if port == 0:
+ port = response.get_int()
+ if handler is None:
+
+ def default_handler(channel, src_addr, dest_addr_port):
+ # src_addr, src_port = src_addr_port
+ # dest_addr, dest_port = dest_addr_port
+ self._queue_incoming_channel(channel)
+
+ handler = default_handler
+ self._tcp_handler = handler
+ return port
+
+ def cancel_port_forward(self, address, port):
+ """
+ Ask the server to cancel a previous port-forwarding request. No more
+ connections to the given address & port will be forwarded across this
+ ssh connection.
+
+ :param str address: the address to stop forwarding
+ :param int port: the port to stop forwarding
+ """
+ if not self.active:
+ return
+ self._tcp_handler = None
+ self.global_request("cancel-tcpip-forward", (address, port), wait=True)
+
+ def open_sftp_client(self):
+ """
+ Create an SFTP client channel from an open transport. On success, an
+ SFTP session will be opened with the remote host, and a new
+ `.SFTPClient` object will be returned.
+
+ :return:
+ a new `.SFTPClient` referring to an sftp session (channel) across
+ this transport
+ """
+ return SFTPClient.from_transport(self)
+
+ def send_ignore(self, byte_count=None):
+ """
+ Send a junk packet across the encrypted link. This is sometimes used
+ to add "noise" to a connection to confuse would-be attackers. It can
+ also be used as a keep-alive for long lived connections traversing
+ firewalls.
+
+ :param int byte_count:
+ the number of random bytes to send in the payload of the ignored
+ packet -- defaults to a random number from 10 to 41.
+ """
+ m = Message()
+ m.add_byte(cMSG_IGNORE)
+ if byte_count is None:
+ byte_count = (byte_ord(os.urandom(1)) % 32) + 10
+ m.add_bytes(os.urandom(byte_count))
+ self._send_user_message(m)
+
+ def renegotiate_keys(self):
+ """
+ Force this session to switch to new keys. Normally this is done
+ automatically after the session hits a certain number of packets or
+ bytes sent or received, but this method gives you the option of forcing
+ new keys whenever you want. Negotiating new keys causes a pause in
+ traffic both ways as the two sides swap keys and do computations. This
+ method returns when the session has switched to new keys.
+
+ :raises:
+ `.SSHException` -- if the key renegotiation failed (which causes
+ the session to end)
+ """
+ self.completion_event = threading.Event()
+ self._send_kex_init()
+ while True:
+ self.completion_event.wait(0.1)
+ if not self.active:
+ e = self.get_exception()
+ if e is not None:
+ raise e
+ raise SSHException("Negotiation failed.")
+ if self.completion_event.is_set():
+ break
+ return
+
+ def set_keepalive(self, interval):
+ """
+ Turn on/off keepalive packets (default is off). If this is set, after
+ ``interval`` seconds without sending any data over the connection, a
+ "keepalive" packet will be sent (and ignored by the remote host). This
+ can be useful to keep connections alive over a NAT, for example.
+
+ :param int interval:
+ seconds to wait before sending a keepalive packet (or
+ 0 to disable keepalives).
+ """
+
+ def _request(x=weakref.proxy(self)):
+ return x.global_request("keepalive@lag.net", wait=False)
+
+ self.packetizer.set_keepalive(interval, _request)
+
+ def global_request(self, kind, data=None, wait=True):
+ """
+ Make a global request to the remote host. These are normally
+ extensions to the SSH2 protocol.
+
+ :param str kind: name of the request.
+ :param tuple data:
+ an optional tuple containing additional data to attach to the
+ request.
+ :param bool wait:
+ ``True`` if this method should not return until a response is
+ received; ``False`` otherwise.
+ :return:
+ a `.Message` containing possible additional data if the request was
+ successful (or an empty `.Message` if ``wait`` was ``False``);
+ ``None`` if the request was denied.
+ """
+ if wait:
+ self.completion_event = threading.Event()
+ m = Message()
+ m.add_byte(cMSG_GLOBAL_REQUEST)
+ m.add_string(kind)
+ m.add_boolean(wait)
+ if data is not None:
+ m.add(*data)
+ self._log(DEBUG, 'Sending global request "{}"'.format(kind))
+ self._send_user_message(m)
+ if not wait:
+ return None
+ while True:
+ self.completion_event.wait(0.1)
+ if not self.active:
+ return None
+ if self.completion_event.is_set():
+ break
+ return self.global_response
+
+ def accept(self, timeout=None):
+ """
+ Return the next channel opened by the client over this transport, in
+ server mode. If no channel is opened before the given timeout,
+ ``None`` is returned.
+
+ :param int timeout:
+ seconds to wait for a channel, or ``None`` to wait forever
+ :return: a new `.Channel` opened by the client
+ """
+ self.lock.acquire()
+ try:
+ if len(self.server_accepts) > 0:
+ chan = self.server_accepts.pop(0)
+ else:
+ self.server_accept_cv.wait(timeout)
+ if len(self.server_accepts) > 0:
+ chan = self.server_accepts.pop(0)
+ else:
+ # timeout
+ chan = None
+ finally:
+ self.lock.release()
+ return chan
+
+ def connect(
+ self,
+ hostkey=None,
+ username="",
+ password=None,
+ pkey=None,
+ gss_host=None,
+ gss_auth=False,
+ gss_kex=False,
+ gss_deleg_creds=True,
+ gss_trust_dns=True,
+ ):
+ """
+ Negotiate an SSH2 session, and optionally verify the server's host key
+ and authenticate using a password or private key. This is a shortcut
+ for `start_client`, `get_remote_server_key`, and
+ `Transport.auth_password` or `Transport.auth_publickey`. Use those
+ methods if you want more control.
+
+ You can use this method immediately after creating a Transport to
+ negotiate encryption with a server. If it fails, an exception will be
+ thrown. On success, the method will return cleanly, and an encrypted
+ session exists. You may immediately call `open_channel` or
+ `open_session` to get a `.Channel` object, which is used for data
+ transfer.
+
+ .. note::
+ If you fail to supply a password or private key, this method may
+ succeed, but a subsequent `open_channel` or `open_session` call may
+ fail because you haven't authenticated yet.
+
+ :param .PKey hostkey:
+ the host key expected from the server, or ``None`` if you don't
+ want to do host key verification.
+ :param str username: the username to authenticate as.
+ :param str password:
+ a password to use for authentication, if you want to use password
+ authentication; otherwise ``None``.
+ :param .PKey pkey:
+ a private key to use for authentication, if you want to use private
+ key authentication; otherwise ``None``.
+ :param str gss_host:
+ The target's name in the kerberos database. Default: hostname
+ :param bool gss_auth:
+ ``True`` if you want to use GSS-API authentication.
+ :param bool gss_kex:
+ Perform GSS-API Key Exchange and user authentication.
+ :param bool gss_deleg_creds:
+ Whether to delegate GSS-API client credentials.
+ :param gss_trust_dns:
+ Indicates whether or not the DNS is trusted to securely
+ canonicalize the name of the host being connected to (default
+ ``True``).
+
+ :raises: `.SSHException` -- if the SSH2 negotiation fails, the host key
+ supplied by the server is incorrect, or authentication fails.
+
+ .. versionchanged:: 2.3
+ Added the ``gss_trust_dns`` argument.
+ """
+ if hostkey is not None:
+ # TODO: a more robust implementation would be to ask each key class
+ # for its nameS plural, and just use that.
+ # TODO: that could be used in a bunch of other spots too
+ if isinstance(hostkey, RSAKey):
+ self._preferred_keys = [
+ "rsa-sha2-512",
+ "rsa-sha2-256",
+ "ssh-rsa",
+ ]
+ else:
+ self._preferred_keys = [hostkey.get_name()]
+
+ self.set_gss_host(
+ gss_host=gss_host,
+ trust_dns=gss_trust_dns,
+ gssapi_requested=gss_kex or gss_auth,
+ )
+
+ self.start_client()
+
+ # check host key if we were given one
+ # If GSS-API Key Exchange was performed, we are not required to check
+ # the host key.
+ if (hostkey is not None) and not gss_kex:
+ key = self.get_remote_server_key()
+ if (
+ key.get_name() != hostkey.get_name()
+ or key.asbytes() != hostkey.asbytes()
+ ):
+ self._log(DEBUG, "Bad host key from server")
+ self._log(
+ DEBUG,
+ "Expected: {}: {}".format(
+ hostkey.get_name(), repr(hostkey.asbytes())
+ ),
+ )
+ self._log(
+ DEBUG,
+ "Got : {}: {}".format(
+ key.get_name(), repr(key.asbytes())
+ ),
+ )
+ raise SSHException("Bad host key from server")
+ self._log(
+ DEBUG, "Host key verified ({})".format(hostkey.get_name())
+ )
+
+ if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
+ if gss_auth:
+ self._log(
+ DEBUG, "Attempting GSS-API auth... (gssapi-with-mic)"
+ ) # noqa
+ self.auth_gssapi_with_mic(
+ username, self.gss_host, gss_deleg_creds
+ )
+ elif gss_kex:
+ self._log(DEBUG, "Attempting GSS-API auth... (gssapi-keyex)")
+ self.auth_gssapi_keyex(username)
+ elif pkey is not None:
+ self._log(DEBUG, "Attempting public-key auth...")
+ self.auth_publickey(username, pkey)
+ else:
+ self._log(DEBUG, "Attempting password auth...")
+ self.auth_password(username, password)
+
+ return
+
+ def get_exception(self):
+ """
+ Return any exception that happened during the last server request.
+ This can be used to fetch more specific error information after using
+ calls like `start_client`. The exception (if any) is cleared after
+ this call.
+
+ :return:
+ an exception, or ``None`` if there is no stored exception.
+
+ .. versionadded:: 1.1
+ """
+ self.lock.acquire()
+ try:
+ e = self.saved_exception
+ self.saved_exception = None
+ return e
+ finally:
+ self.lock.release()
+
+ def set_subsystem_handler(self, name, handler, *args, **kwargs):
+ """
+ Set the handler class for a subsystem in server mode. If a request
+ for this subsystem is made on an open ssh channel later, this handler
+ will be constructed and called -- see `.SubsystemHandler` for more
+ detailed documentation.
+
+ Any extra parameters (including keyword arguments) are saved and
+ passed to the `.SubsystemHandler` constructor later.
+
+ :param str name: name of the subsystem.
+ :param handler:
+ subclass of `.SubsystemHandler` that handles this subsystem.
+ """
+ try:
+ self.lock.acquire()
+ self.subsystem_table[name] = (handler, args, kwargs)
+ finally:
+ self.lock.release()
+
+ def is_authenticated(self):
+ """
+ Return true if this session is active and authenticated.
+
+ :return:
+ True if the session is still open and has been authenticated
+ successfully; False if authentication failed and/or the session is
+ closed.
+ """
+ return (
+ self.active
+ and self.auth_handler is not None
+ and self.auth_handler.is_authenticated()
+ )
+
+ def get_username(self):
+ """
+ Return the username this connection is authenticated for. If the
+ session is not authenticated (or authentication failed), this method
+ returns ``None``.
+
+ :return: username that was authenticated (a `str`), or ``None``.
+ """
+ if not self.active or (self.auth_handler is None):
+ return None
+ return self.auth_handler.get_username()
+
+ def get_banner(self):
+ """
+ Return the banner supplied by the server upon connect. If no banner is
+ supplied, this method returns ``None``.
+
+ :returns: server supplied banner (`str`), or ``None``.
+
+ .. versionadded:: 1.13
+ """
+ if not self.active or (self.auth_handler is None):
+ return None
+ return self.auth_handler.banner
+
+ def auth_none(self, username):
+ """
+ Try to authenticate to the server using no authentication at all.
+ This will almost always fail. It may be useful for determining the
+ list of authentication types supported by the server, by catching the
+ `.BadAuthenticationType` exception raised.
+
+ :param str username: the username to authenticate as
+ :return:
+ list of auth types permissible for the next stage of
+ authentication (normally empty)
+
+ :raises:
+ `.BadAuthenticationType` -- if "none" authentication isn't allowed
+ by the server for this user
+ :raises:
+ `.SSHException` -- if the authentication failed due to a network
+ error
+
+ .. versionadded:: 1.5
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ raise SSHException("No existing session")
+ my_event = threading.Event()
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_none(username, my_event)
+ return self.auth_handler.wait_for_response(my_event)
+
+ def auth_password(self, username, password, event=None, fallback=True):
+ """
+ Authenticate to the server using a password. The username and password
+ are sent over an encrypted link.
+
+ If an ``event`` is passed in, this method will return immediately, and
+ the event will be triggered once authentication succeeds or fails. On
+ success, `is_authenticated` will return ``True``. On failure, you may
+ use `get_exception` to get more detailed error information.
+
+ Since 1.1, if no event is passed, this method will block until the
+ authentication succeeds or fails. On failure, an exception is raised.
+ Otherwise, the method simply returns.
+
+ Since 1.5, if no event is passed and ``fallback`` is ``True`` (the
+ default), if the server doesn't support plain password authentication
+ but does support so-called "keyboard-interactive" mode, an attempt
+ will be made to authenticate using this interactive mode. If it fails,
+ the normal exception will be thrown as if the attempt had never been
+ made. This is useful for some recent Gentoo and Debian distributions,
+ which turn off plain password authentication in a misguided belief
+ that interactive authentication is "more secure". (It's not.)
+
+ If the server requires multi-step authentication (which is very rare),
+ this method will return a list of auth types permissible for the next
+ step. Otherwise, in the normal case, an empty list is returned.
+
+ :param str username: the username to authenticate as
+ :param basestring password: the password to authenticate with
+ :param .threading.Event event:
+ an event to trigger when the authentication attempt is complete
+ (whether it was successful or not)
+ :param bool fallback:
+ ``True`` if an attempt at an automated "interactive" password auth
+ should be made if the server doesn't support normal password auth
+ :return:
+ list of auth types permissible for the next stage of
+ authentication (normally empty)
+
+ :raises:
+ `.BadAuthenticationType` -- if password authentication isn't
+ allowed by the server for this user (and no event was passed in)
+ :raises:
+ `.AuthenticationException` -- if the authentication failed (and no
+ event was passed in)
+ :raises: `.SSHException` -- if there was a network error
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to send the password unless we're on a secure
+ # link
+ raise SSHException("No existing session")
+ if event is None:
+ my_event = threading.Event()
+ else:
+ my_event = event
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_password(username, password, my_event)
+ if event is not None:
+ # caller wants to wait for event themselves
+ return []
+ try:
+ return self.auth_handler.wait_for_response(my_event)
+ except BadAuthenticationType as e:
+ # if password auth isn't allowed, but keyboard-interactive *is*,
+ # try to fudge it
+ if not fallback or ("keyboard-interactive" not in e.allowed_types):
+ raise
+ try:
+
+ def handler(title, instructions, fields):
+ if len(fields) > 1:
+ raise SSHException("Fallback authentication failed.")
+ if len(fields) == 0:
+ # for some reason, at least on os x, a 2nd request will
+ # be made with zero fields requested. maybe it's just
+ # to try to fake out automated scripting of the exact
+ # type we're doing here. *shrug* :)
+ return []
+ return [password]
+
+ return self.auth_interactive(username, handler)
+ except SSHException:
+ # attempt failed; just raise the original exception
+ raise e
+
+ def auth_publickey(self, username, key, event=None):
+ """
+ Authenticate to the server using a private key. The key is used to
+ sign data from the server, so it must include the private part.
+
+ If an ``event`` is passed in, this method will return immediately, and
+ the event will be triggered once authentication succeeds or fails. On
+ success, `is_authenticated` will return ``True``. On failure, you may
+ use `get_exception` to get more detailed error information.
+
+ Since 1.1, if no event is passed, this method will block until the
+ authentication succeeds or fails. On failure, an exception is raised.
+ Otherwise, the method simply returns.
+
+ If the server requires multi-step authentication (which is very rare),
+ this method will return a list of auth types permissible for the next
+ step. Otherwise, in the normal case, an empty list is returned.
+
+ :param str username: the username to authenticate as
+ :param .PKey key: the private key to authenticate with
+ :param .threading.Event event:
+ an event to trigger when the authentication attempt is complete
+ (whether it was successful or not)
+ :return:
+ list of auth types permissible for the next stage of
+ authentication (normally empty)
+
+ :raises:
+ `.BadAuthenticationType` -- if public-key authentication isn't
+ allowed by the server for this user (and no event was passed in)
+ :raises:
+ `.AuthenticationException` -- if the authentication failed (and no
+ event was passed in)
+ :raises: `.SSHException` -- if there was a network error
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to authenticate unless we're on a secure link
+ raise SSHException("No existing session")
+ if event is None:
+ my_event = threading.Event()
+ else:
+ my_event = event
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_publickey(username, key, my_event)
+ if event is not None:
+ # caller wants to wait for event themselves
+ return []
+ return self.auth_handler.wait_for_response(my_event)
+
+ def auth_interactive(self, username, handler, submethods=""):
+ """
+ Authenticate to the server interactively. A handler is used to answer
+ arbitrary questions from the server. On many servers, this is just a
+ dumb wrapper around PAM.
+
+ This method will block until the authentication succeeds or fails,
+ periodically calling the handler asynchronously to get answers to
+ authentication questions. The handler may be called more than once
+ if the server continues to ask questions.
+
+ The handler is expected to be a callable that will handle calls of the
+ form: ``handler(title, instructions, prompt_list)``. The ``title`` is
+ meant to be a dialog-window title, and the ``instructions`` are user
+ instructions (both are strings). ``prompt_list`` will be a list of
+ prompts, each prompt being a tuple of ``(str, bool)``. The string is
+ the prompt and the boolean indicates whether the user text should be
+ echoed.
+
+ A sample call would thus be:
+ ``handler('title', 'instructions', [('Password:', False)])``.
+
+ The handler should return a list or tuple of answers to the server's
+ questions.
+
+ If the server requires multi-step authentication (which is very rare),
+ this method will return a list of auth types permissible for the next
+ step. Otherwise, in the normal case, an empty list is returned.
+
+ :param str username: the username to authenticate as
+ :param callable handler: a handler for responding to server questions
+ :param str submethods: a string list of desired submethods (optional)
+ :return:
+ list of auth types permissible for the next stage of
+ authentication (normally empty).
+
+ :raises: `.BadAuthenticationType` -- if public-key authentication isn't
+ allowed by the server for this user
+ :raises: `.AuthenticationException` -- if the authentication failed
+ :raises: `.SSHException` -- if there was a network error
+
+ .. versionadded:: 1.5
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to authenticate unless we're on a secure link
+ raise SSHException("No existing session")
+ my_event = threading.Event()
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_interactive(
+ username, handler, my_event, submethods
+ )
+ return self.auth_handler.wait_for_response(my_event)
+
+ def auth_interactive_dumb(self, username, handler=None, submethods=""):
+ """
+ Authenticate to the server interactively but dumber.
+ Just print the prompt and / or instructions to stdout and send back
+ the response. This is good for situations where partial auth is
+ achieved by key and then the user has to enter a 2fac token.
+ """
+
+ if not handler:
+
+ def handler(title, instructions, prompt_list):
+ answers = []
+ if title:
+ print(title.strip())
+ if instructions:
+ print(instructions.strip())
+ for prompt, show_input in prompt_list:
+ print(prompt.strip(), end=" ")
+ answers.append(input())
+ return answers
+
+ return self.auth_interactive(username, handler, submethods)
+
+ def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
+ """
+ Authenticate to the Server using GSS-API / SSPI.
+
+ :param str username: The username to authenticate as
+ :param str gss_host: The target host
+ :param bool gss_deleg_creds: Delegate credentials or not
+ :return: list of auth types permissible for the next stage of
+ authentication (normally empty)
+ :raises: `.BadAuthenticationType` -- if gssapi-with-mic isn't
+ allowed by the server (and no event was passed in)
+ :raises:
+ `.AuthenticationException` -- if the authentication failed (and no
+ event was passed in)
+ :raises: `.SSHException` -- if there was a network error
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to authenticate unless we're on a secure link
+ raise SSHException("No existing session")
+ my_event = threading.Event()
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_gssapi_with_mic(
+ username, gss_host, gss_deleg_creds, my_event
+ )
+ return self.auth_handler.wait_for_response(my_event)
+
+ def auth_gssapi_keyex(self, username):
+ """
+ Authenticate to the server with GSS-API/SSPI if GSS-API kex is in use.
+
+ :param str username: The username to authenticate as.
+ :returns:
+ a list of auth types permissible for the next stage of
+ authentication (normally empty)
+ :raises: `.BadAuthenticationType` --
+ if GSS-API Key Exchange was not performed (and no event was passed
+ in)
+ :raises: `.AuthenticationException` --
+ if the authentication failed (and no event was passed in)
+ :raises: `.SSHException` -- if there was a network error
+ """
+ if (not self.active) or (not self.initial_kex_done):
+ # we should never try to authenticate unless we're on a secure link
+ raise SSHException("No existing session")
+ my_event = threading.Event()
+ self.auth_handler = AuthHandler(self)
+ self.auth_handler.auth_gssapi_keyex(username, my_event)
+ return self.auth_handler.wait_for_response(my_event)
+
+ def set_log_channel(self, name):
+ """
+ Set the channel for this transport's logging. The default is
+ ``"paramiko.transport"`` but it can be set to anything you want. (See
+ the `.logging` module for more info.) SSH Channels will log to a
+ sub-channel of the one specified.
+
+ :param str name: new channel name for logging
+
+ .. versionadded:: 1.1
+ """
+ self.log_name = name
+ self.logger = util.get_logger(name)
+ self.packetizer.set_log(self.logger)
+
+ def get_log_channel(self):
+ """
+ Return the channel name used for this transport's logging.
+
+ :return: channel name as a `str`
+
+ .. versionadded:: 1.2
+ """
+ return self.log_name
+
+ def set_hexdump(self, hexdump):
+ """
+ Turn on/off logging a hex dump of protocol traffic at DEBUG level in
+ the logs. Normally you would want this off (which is the default),
+ but if you are debugging something, it may be useful.
+
+ :param bool hexdump:
+ ``True`` to log protocol traffix (in hex) to the log; ``False``
+ otherwise.
+ """
+ self.packetizer.set_hexdump(hexdump)
+
+ def get_hexdump(self):
+ """
+ Return ``True`` if the transport is currently logging hex dumps of
+ protocol traffic.
+
+ :return: ``True`` if hex dumps are being logged, else ``False``.
+
+ .. versionadded:: 1.4
+ """
+ return self.packetizer.get_hexdump()
+
+ def use_compression(self, compress=True):
+ """
+ Turn on/off compression. This will only have an affect before starting
+ the transport (ie before calling `connect`, etc). By default,
+ compression is off since it negatively affects interactive sessions.
+
+ :param bool compress:
+ ``True`` to ask the remote client/server to compress traffic;
+ ``False`` to refuse compression
+
+ .. versionadded:: 1.5.2
+ """
+ if compress:
+ self._preferred_compression = ("zlib@openssh.com", "zlib", "none")
+ else:
+ self._preferred_compression = ("none",)
+
+ def getpeername(self):
+ """
+ Return the address of the remote side of this Transport, if possible.
+
+ This is effectively a wrapper around ``getpeername`` on the underlying
+ socket. If the socket-like object has no ``getpeername`` method, then
+ ``("unknown", 0)`` is returned.
+
+ :return:
+ the address of the remote host, if known, as a ``(str, int)``
+ tuple.
+ """
+ gp = getattr(self.sock, "getpeername", None)
+ if gp is None:
+ return "unknown", 0
+ return gp()
+
+ def stop_thread(self):
+ self.active = False
+ self.packetizer.close()
+ # Keep trying to join() our main thread, quickly, until:
+ # * We join()ed successfully (self.is_alive() == False)
+ # * Or it looks like we've hit issue #520 (socket.recv hitting some
+ # race condition preventing it from timing out correctly), wherein
+ # our socket and packetizer are both closed (but where we'd
+ # otherwise be sitting forever on that recv()).
+ while (
+ self.is_alive()
+ and self is not threading.current_thread()
+ and not self.sock._closed
+ and not self.packetizer.closed
+ ):
+ self.join(0.1)
+
+ # internals...
+
+ # TODO 4.0: make a public alias for this because multiple other classes
+ # already explicitly rely on it...or just rewrite logging :D
+ def _log(self, level, msg, *args):
+ if issubclass(type(msg), list):
+ for m in msg:
+ self.logger.log(level, m)
+ else:
+ self.logger.log(level, msg, *args)
+
+ def _get_modulus_pack(self):
+ """used by KexGex to find primes for group exchange"""
+ return self._modulus_pack
+
+ def _next_channel(self):
+ """you are holding the lock"""
+ chanid = self._channel_counter
+ while self._channels.get(chanid) is not None:
+ self._channel_counter = (self._channel_counter + 1) & 0xFFFFFF
+ chanid = self._channel_counter
+ self._channel_counter = (self._channel_counter + 1) & 0xFFFFFF
+ return chanid
+
+ def _unlink_channel(self, chanid):
+ """used by a Channel to remove itself from the active channel list"""
+ self._channels.delete(chanid)
+
+ def _send_message(self, data):
+ self.packetizer.send_message(data)
+
+ def _send_user_message(self, data):
+ """
+ send a message, but block if we're in key negotiation. this is used
+ for user-initiated requests.
+ """
+ start = time.time()
+ while True:
+ self.clear_to_send.wait(0.1)
+ if not self.active:
+ self._log(
+ DEBUG, "Dropping user packet because connection is dead."
+ ) # noqa
+ return
+ self.clear_to_send_lock.acquire()
+ if self.clear_to_send.is_set():
+ break
+ self.clear_to_send_lock.release()
+ if time.time() > start + self.clear_to_send_timeout:
+ raise SSHException(
+ "Key-exchange timed out waiting for key negotiation"
+ ) # noqa
+ try:
+ self._send_message(data)
+ finally:
+ self.clear_to_send_lock.release()
+
+ def _set_K_H(self, k, h):
+ """
+ Used by a kex obj to set the K (root key) and H (exchange hash).
+ """
+ self.K = k
+ self.H = h
+ if self.session_id is None:
+ self.session_id = h
+
+ def _expect_packet(self, *ptypes):
+ """
+ Used by a kex obj to register the next packet type it expects to see.
+ """
+ self._expected_packet = tuple(ptypes)
+
+ def _verify_key(self, host_key, sig):
+ key = self._key_info[self.host_key_type](Message(host_key))
+ if key is None:
+ raise SSHException("Unknown host key type")
+ if not key.verify_ssh_sig(self.H, Message(sig)):
+ raise SSHException(
+ "Signature verification ({}) failed.".format(
+ self.host_key_type
+ )
+ ) # noqa
+ self.host_key = key
+
+ def _compute_key(self, id, nbytes):
+ """id is 'A' - 'F' for the various keys used by ssh"""
+ m = Message()
+ m.add_mpint(self.K)
+ m.add_bytes(self.H)
+ m.add_byte(b(id))
+ m.add_bytes(self.session_id)
+ # Fallback to SHA1 for kex engines that fail to specify a hex
+ # algorithm, or for e.g. transport tests that don't run kexinit.
+ hash_algo = getattr(self.kex_engine, "hash_algo", None)
+ hash_select_msg = "kex engine {} specified hash_algo {!r}".format(
+ self.kex_engine.__class__.__name__, hash_algo
+ )
+ if hash_algo is None:
+ hash_algo = sha1
+ hash_select_msg += ", falling back to sha1"
+ if not hasattr(self, "_logged_hash_selection"):
+ self._log(DEBUG, hash_select_msg)
+ setattr(self, "_logged_hash_selection", True)
+ out = sofar = hash_algo(m.asbytes()).digest()
+ while len(out) < nbytes:
+ m = Message()
+ m.add_mpint(self.K)
+ m.add_bytes(self.H)
+ m.add_bytes(sofar)
+ digest = hash_algo(m.asbytes()).digest()
+ out += digest
+ sofar += digest
+ return out[:nbytes]
+
+ def _get_cipher(self, name, key, iv, operation):
+ if name not in self._cipher_info:
+ raise SSHException("Unknown client cipher " + name)
+ else:
+ cipher = Cipher(
+ self._cipher_info[name]["class"](key),
+ self._cipher_info[name]["mode"](iv),
+ backend=default_backend(),
+ )
+ if operation is self._ENCRYPT:
+ return cipher.encryptor()
+ else:
+ return cipher.decryptor()
+
+ def _set_forward_agent_handler(self, handler):
+ if handler is None:
+
+ def default_handler(channel):
+ self._queue_incoming_channel(channel)
+
+ self._forward_agent_handler = default_handler
+ else:
+ self._forward_agent_handler = handler
+
+ def _set_x11_handler(self, handler):
+ # only called if a channel has turned on x11 forwarding
+ if handler is None:
+ # by default, use the same mechanism as accept()
+ def default_handler(channel, src_addr_port):
+ self._queue_incoming_channel(channel)
+
+ self._x11_handler = default_handler
+ else:
+ self._x11_handler = handler
+
+ def _queue_incoming_channel(self, channel):
+ self.lock.acquire()
+ try:
+ self.server_accepts.append(channel)
+ self.server_accept_cv.notify()
+ finally:
+ self.lock.release()
+
+ def _sanitize_window_size(self, window_size):
+ if window_size is None:
+ window_size = self.default_window_size
+ return clamp_value(MIN_WINDOW_SIZE, window_size, MAX_WINDOW_SIZE)
+
+ def _sanitize_packet_size(self, max_packet_size):
+ if max_packet_size is None:
+ max_packet_size = self.default_max_packet_size
+ return clamp_value(MIN_PACKET_SIZE, max_packet_size, MAX_WINDOW_SIZE)
+
+ def _ensure_authed(self, ptype, message):
+ """
+ Checks message type against current auth state.
+
+ If server mode, and auth has not succeeded, and the message is of a
+ post-auth type (channel open or global request) an appropriate error
+ response Message is crafted and returned to caller for sending.
+
+ Otherwise (client mode, authed, or pre-auth message) returns None.
+ """
+ if (
+ not self.server_mode
+ or ptype <= HIGHEST_USERAUTH_MESSAGE_ID
+ or self.is_authenticated()
+ ):
+ return None
+ # WELP. We must be dealing with someone trying to do non-auth things
+ # without being authed. Tell them off, based on message class.
+ reply = Message()
+ # Global requests have no details, just failure.
+ if ptype == MSG_GLOBAL_REQUEST:
+ reply.add_byte(cMSG_REQUEST_FAILURE)
+ # Channel opens let us reject w/ a specific type + message.
+ elif ptype == MSG_CHANNEL_OPEN:
+ kind = message.get_text() # noqa
+ chanid = message.get_int()
+ reply.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
+ reply.add_int(chanid)
+ reply.add_int(OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED)
+ reply.add_string("")
+ reply.add_string("en")
+ # NOTE: Post-open channel messages do not need checking; the above will
+ # reject attempts to open channels, meaning that even if a malicious
+ # user tries to send a MSG_CHANNEL_REQUEST, it will simply fall under
+ # the logic that handles unknown channel IDs (as the channel list will
+ # be empty.)
+ return reply
+
+ def _enforce_strict_kex(self, ptype):
+ """
+ Conditionally raise `MessageOrderError` during strict initial kex.
+
+ This method should only be called inside code that handles non-KEXINIT
+ messages; it does not interrogate ``ptype`` besides using it to log
+ more accurately.
+ """
+ if self.agreed_on_strict_kex and not self.initial_kex_done:
+ name = MSG_NAMES.get(ptype, f"msg {ptype}")
+ raise MessageOrderError(
+ f"In strict-kex mode, but was sent {name!r}!"
+ )
+
+ def run(self):
+ # (use the exposed "run" method, because if we specify a thread target
+ # of a private method, threading.Thread will keep a reference to it
+ # indefinitely, creating a GC cycle and not letting Transport ever be
+ # GC'd. it's a bug in Thread.)
+
+ # Hold reference to 'sys' so we can test sys.modules to detect
+ # interpreter shutdown.
+ self.sys = sys
+
+ # active=True occurs before the thread is launched, to avoid a race
+ _active_threads.append(self)
+ tid = hex(id(self) & xffffffff)
+ if self.server_mode:
+ self._log(DEBUG, "starting thread (server mode): {}".format(tid))
+ else:
+ self._log(DEBUG, "starting thread (client mode): {}".format(tid))
+ try:
+ try:
+ self.packetizer.write_all(b(self.local_version + "\r\n"))
+ self._log(
+ DEBUG,
+ "Local version/idstring: {}".format(self.local_version),
+ ) # noqa
+ self._check_banner()
+ # The above is actually very much part of the handshake, but
+ # sometimes the banner can be read but the machine is not
+ # responding, for example when the remote ssh daemon is loaded
+ # in to memory but we can not read from the disk/spawn a new
+ # shell.
+ # Make sure we can specify a timeout for the initial handshake.
+ # Re-use the banner timeout for now.
+ self.packetizer.start_handshake(self.handshake_timeout)
+ self._send_kex_init()
+ self._expect_packet(MSG_KEXINIT)
+
+ while self.active:
+ if self.packetizer.need_rekey() and not self.in_kex:
+ self._send_kex_init()
+ try:
+ ptype, m = self.packetizer.read_message()
+ except NeedRekeyException:
+ continue
+ if ptype == MSG_IGNORE:
+ self._enforce_strict_kex(ptype)
+ continue
+ elif ptype == MSG_DISCONNECT:
+ self._parse_disconnect(m)
+ break
+ elif ptype == MSG_DEBUG:
+ self._enforce_strict_kex(ptype)
+ self._parse_debug(m)
+ continue
+ if len(self._expected_packet) > 0:
+ if ptype not in self._expected_packet:
+ exc_class = SSHException
+ if self.agreed_on_strict_kex:
+ exc_class = MessageOrderError
+ raise exc_class(
+ "Expecting packet from {!r}, got {:d}".format(
+ self._expected_packet, ptype
+ )
+ ) # noqa
+ self._expected_packet = tuple()
+ # These message IDs indicate key exchange & will differ
+ # depending on exact exchange algorithm
+ if (ptype >= 30) and (ptype <= 41):
+ self.kex_engine.parse_next(ptype, m)
+ continue
+
+ if ptype in self._handler_table:
+ error_msg = self._ensure_authed(ptype, m)
+ if error_msg:
+ self._send_message(error_msg)
+ else:
+ self._handler_table[ptype](m)
+ elif ptype in self._channel_handler_table:
+ chanid = m.get_int()
+ chan = self._channels.get(chanid)
+ if chan is not None:
+ self._channel_handler_table[ptype](chan, m)
+ elif chanid in self.channels_seen:
+ self._log(
+ DEBUG,
+ "Ignoring message for dead channel {:d}".format( # noqa
+ chanid
+ ),
+ )
+ else:
+ self._log(
+ ERROR,
+ "Channel request for unknown channel {:d}".format( # noqa
+ chanid
+ ),
+ )
+ break
+ elif (
+ self.auth_handler is not None
+ and ptype in self.auth_handler._handler_table
+ ):
+ handler = self.auth_handler._handler_table[ptype]
+ handler(m)
+ if len(self._expected_packet) > 0:
+ continue
+ else:
+ # Respond with "I don't implement this particular
+ # message type" message (unless the message type was
+ # itself literally MSG_UNIMPLEMENTED, in which case, we
+ # just shut up to avoid causing a useless loop).
+ name = MSG_NAMES[ptype]
+ warning = "Oops, unhandled type {} ({!r})".format(
+ ptype, name
+ )
+ self._log(WARNING, warning)
+ if ptype != MSG_UNIMPLEMENTED:
+ msg = Message()
+ msg.add_byte(cMSG_UNIMPLEMENTED)
+ msg.add_int(m.seqno)
+ self._send_message(msg)
+ self.packetizer.complete_handshake()
+ except SSHException as e:
+ self._log(
+ ERROR,
+ "Exception ({}): {}".format(
+ "server" if self.server_mode else "client", e
+ ),
+ )
+ self._log(ERROR, util.tb_strings())
+ self.saved_exception = e
+ except EOFError as e:
+ self._log(DEBUG, "EOF in transport thread")
+ self.saved_exception = e
+ except socket.error as e:
+ if type(e.args) is tuple:
+ if e.args:
+ emsg = "{} ({:d})".format(e.args[1], e.args[0])
+ else: # empty tuple, e.g. socket.timeout
+ emsg = str(e) or repr(e)
+ else:
+ emsg = e.args
+ self._log(ERROR, "Socket exception: " + emsg)
+ self.saved_exception = e
+ except Exception as e:
+ self._log(ERROR, "Unknown exception: " + str(e))
+ self._log(ERROR, util.tb_strings())
+ self.saved_exception = e
+ _active_threads.remove(self)
+ for chan in list(self._channels.values()):
+ chan._unlink()
+ if self.active:
+ self.active = False
+ self.packetizer.close()
+ if self.completion_event is not None:
+ self.completion_event.set()
+ if self.auth_handler is not None:
+ self.auth_handler.abort()
+ for event in self.channel_events.values():
+ event.set()
+ try:
+ self.lock.acquire()
+ self.server_accept_cv.notify()
+ finally:
+ self.lock.release()
+ self.sock.close()
+ except:
+ # Don't raise spurious 'NoneType has no attribute X' errors when we
+ # wake up during interpreter shutdown. Or rather -- raise
+ # everything *if* sys.modules (used as a convenient sentinel)
+ # appears to still exist.
+ if self.sys.modules is not None:
+ raise
+
+ def _log_agreement(self, which, local, remote):
+ # Log useful, non-duplicative line re: an agreed-upon algorithm.
+ # Old code implied algorithms could be asymmetrical (different for
+ # inbound vs outbound) so we preserve that possibility.
+ msg = "{}: ".format(which)
+ if local == remote:
+ msg += local
+ else:
+ msg += "local={}, remote={}".format(local, remote)
+ self._log(DEBUG, msg)
+
+ # protocol stages
+
+ def _negotiate_keys(self, m):
+ # throws SSHException on anything unusual
+ self.clear_to_send_lock.acquire()
+ try:
+ self.clear_to_send.clear()
+ finally:
+ self.clear_to_send_lock.release()
+ if self.local_kex_init is None:
+ # remote side wants to renegotiate
+ self._send_kex_init()
+ self._parse_kex_init(m)
+ self.kex_engine.start_kex()
+
+ def _check_banner(self):
+ # this is slow, but we only have to do it once
+ for i in range(100):
+ # give them 15 seconds for the first line, then just 2 seconds
+ # each additional line. (some sites have very high latency.)
+ if i == 0:
+ timeout = self.banner_timeout
+ else:
+ timeout = 2
+ try:
+ buf = self.packetizer.readline(timeout)
+ except ProxyCommandFailure:
+ raise
+ except Exception as e:
+ raise SSHException(
+ "Error reading SSH protocol banner" + str(e)
+ )
+ if buf[:4] == "SSH-":
+ break
+ self._log(DEBUG, "Banner: " + buf)
+ if buf[:4] != "SSH-":
+ raise SSHException('Indecipherable protocol version "' + buf + '"')
+ # save this server version string for later
+ self.remote_version = buf
+ self._log(DEBUG, "Remote version/idstring: {}".format(buf))
+ # pull off any attached comment
+ # NOTE: comment used to be stored in a variable and then...never used.
+ # since 2003. ca 877cd974b8182d26fa76d566072917ea67b64e67
+ i = buf.find(" ")
+ if i >= 0:
+ buf = buf[:i]
+ # parse out version string and make sure it matches
+ segs = buf.split("-", 2)
+ if len(segs) < 3:
+ raise SSHException("Invalid SSH banner")
+ version = segs[1]
+ client = segs[2]
+ if version != "1.99" and version != "2.0":
+ msg = "Incompatible version ({} instead of 2.0)"
+ raise IncompatiblePeer(msg.format(version))
+ msg = "Connected (version {}, client {})".format(version, client)
+ self._log(INFO, msg)
+
+ def _send_kex_init(self):
+ """
+ announce to the other side that we'd like to negotiate keys, and what
+ kind of key negotiation we support.
+ """
+ self.clear_to_send_lock.acquire()
+ try:
+ self.clear_to_send.clear()
+ finally:
+ self.clear_to_send_lock.release()
+ self.gss_kex_used = False
+ self.in_kex = True
+ kex_algos = list(self.preferred_kex)
+ if self.server_mode:
+ mp_required_prefix = "diffie-hellman-group-exchange-sha"
+ kex_mp = [k for k in kex_algos if k.startswith(mp_required_prefix)]
+ if (self._modulus_pack is None) and (len(kex_mp) > 0):
+ # can't do group-exchange if we don't have a pack of potential
+ # primes
+ pkex = [
+ k
+ for k in self.get_security_options().kex
+ if not k.startswith(mp_required_prefix)
+ ]
+ self.get_security_options().kex = pkex
+ available_server_keys = list(
+ filter(
+ list(self.server_key_dict.keys()).__contains__,
+ # TODO: ensure tests will catch if somebody streamlines
+ # this by mistake - case is the admittedly silly one where
+ # the only calls to add_server_key() contain keys which
+ # were filtered out of the below via disabled_algorithms.
+ # If this is streamlined, we would then be allowing the
+ # disabled algorithm(s) for hostkey use
+ # TODO: honestly this prob just wants to get thrown out
+ # when we make kex configuration more straightforward
+ self.preferred_keys,
+ )
+ )
+ else:
+ available_server_keys = self.preferred_keys
+ # Signal support for MSG_EXT_INFO so server will send it to us.
+ # NOTE: doing this here handily means we don't even consider this
+ # value when agreeing on real kex algo to use (which is a common
+ # pitfall when adding this apparently).
+ kex_algos.append("ext-info-c")
+
+ # Similar to ext-info, but used in both server modes, so done outside
+ # of above if/else.
+ if self.advertise_strict_kex:
+ which = "s" if self.server_mode else "c"
+ kex_algos.append(f"kex-strict-{which}-v00@openssh.com")
+
+ m = Message()
+ m.add_byte(cMSG_KEXINIT)
+ m.add_bytes(os.urandom(16))
+ m.add_list(kex_algos)
+ m.add_list(available_server_keys)
+ m.add_list(self.preferred_ciphers)
+ m.add_list(self.preferred_ciphers)
+ m.add_list(self.preferred_macs)
+ m.add_list(self.preferred_macs)
+ m.add_list(self.preferred_compression)
+ m.add_list(self.preferred_compression)
+ m.add_string(bytes())
+ m.add_string(bytes())
+ m.add_boolean(False)
+ m.add_int(0)
+ # save a copy for later (needed to compute a hash)
+ self.local_kex_init = self._latest_kex_init = m.asbytes()
+ self._send_message(m)
+
+ def _really_parse_kex_init(self, m, ignore_first_byte=False):
+ parsed = {}
+ if ignore_first_byte:
+ m.get_byte()
+ m.get_bytes(16) # cookie, discarded
+ parsed["kex_algo_list"] = m.get_list()
+ parsed["server_key_algo_list"] = m.get_list()
+ parsed["client_encrypt_algo_list"] = m.get_list()
+ parsed["server_encrypt_algo_list"] = m.get_list()
+ parsed["client_mac_algo_list"] = m.get_list()
+ parsed["server_mac_algo_list"] = m.get_list()
+ parsed["client_compress_algo_list"] = m.get_list()
+ parsed["server_compress_algo_list"] = m.get_list()
+ parsed["client_lang_list"] = m.get_list()
+ parsed["server_lang_list"] = m.get_list()
+ parsed["kex_follows"] = m.get_boolean()
+ m.get_int() # unused
+ return parsed
+
+ def _get_latest_kex_init(self):
+ return self._really_parse_kex_init(
+ Message(self._latest_kex_init),
+ ignore_first_byte=True,
+ )
+
+ def _parse_kex_init(self, m):
+ parsed = self._really_parse_kex_init(m)
+ kex_algo_list = parsed["kex_algo_list"]
+ server_key_algo_list = parsed["server_key_algo_list"]
+ client_encrypt_algo_list = parsed["client_encrypt_algo_list"]
+ server_encrypt_algo_list = parsed["server_encrypt_algo_list"]
+ client_mac_algo_list = parsed["client_mac_algo_list"]
+ server_mac_algo_list = parsed["server_mac_algo_list"]
+ client_compress_algo_list = parsed["client_compress_algo_list"]
+ server_compress_algo_list = parsed["server_compress_algo_list"]
+ client_lang_list = parsed["client_lang_list"]
+ server_lang_list = parsed["server_lang_list"]
+ kex_follows = parsed["kex_follows"]
+
+ self._log(DEBUG, "=== Key exchange possibilities ===")
+ for prefix, value in (
+ ("kex algos", kex_algo_list),
+ ("server key", server_key_algo_list),
+ # TODO: shouldn't these two lines say "cipher" to match usual
+ # terminology (including elsewhere in paramiko!)?
+ ("client encrypt", client_encrypt_algo_list),
+ ("server encrypt", server_encrypt_algo_list),
+ ("client mac", client_mac_algo_list),
+ ("server mac", server_mac_algo_list),
+ ("client compress", client_compress_algo_list),
+ ("server compress", server_compress_algo_list),
+ ("client lang", client_lang_list),
+ ("server lang", server_lang_list),
+ ):
+ if value == [""]:
+ value = ["<none>"]
+ value = ", ".join(value)
+ self._log(DEBUG, "{}: {}".format(prefix, value))
+ self._log(DEBUG, "kex follows: {}".format(kex_follows))
+ self._log(DEBUG, "=== Key exchange agreements ===")
+
+ # Record, and strip out, ext-info and/or strict-kex non-algorithms
+ self._remote_ext_info = None
+ self._remote_strict_kex = None
+ to_pop = []
+ for i, algo in enumerate(kex_algo_list):
+ if algo.startswith("ext-info-"):
+ self._remote_ext_info = algo
+ to_pop.insert(0, i)
+ elif algo.startswith("kex-strict-"):
+ # NOTE: this is what we are expecting from the /remote/ end.
+ which = "c" if self.server_mode else "s"
+ expected = f"kex-strict-{which}-v00@openssh.com"
+ # Set strict mode if agreed.
+ self.agreed_on_strict_kex = (
+ algo == expected and self.advertise_strict_kex
+ )
+ self._log(
+ DEBUG, f"Strict kex mode: {self.agreed_on_strict_kex}"
+ )
+ to_pop.insert(0, i)
+ for i in to_pop:
+ kex_algo_list.pop(i)
+
+ # CVE mitigation: expect zeroed-out seqno anytime we are performing kex
+ # init phase, if strict mode was negotiated.
+ if (
+ self.agreed_on_strict_kex
+ and not self.initial_kex_done
+ and m.seqno != 0
+ ):
+ raise MessageOrderError(
+ "In strict-kex mode, but KEXINIT was not the first packet!"
+ )
+
+ # as a server, we pick the first item in the client's list that we
+ # support.
+ # as a client, we pick the first item in our list that the server
+ # supports.
+ if self.server_mode:
+ agreed_kex = list(
+ filter(self.preferred_kex.__contains__, kex_algo_list)
+ )
+ else:
+ agreed_kex = list(
+ filter(kex_algo_list.__contains__, self.preferred_kex)
+ )
+ if len(agreed_kex) == 0:
+ # TODO: do an auth-overhaul style aggregate exception here?
+ # TODO: would let us streamline log output & show all failures up
+ # front
+ raise IncompatiblePeer(
+ "Incompatible ssh peer (no acceptable kex algorithm)"
+ ) # noqa
+ self.kex_engine = self._kex_info[agreed_kex[0]](self)
+ self._log(DEBUG, "Kex: {}".format(agreed_kex[0]))
+
+ if self.server_mode:
+ available_server_keys = list(
+ filter(
+ list(self.server_key_dict.keys()).__contains__,
+ self.preferred_keys,
+ )
+ )
+ agreed_keys = list(
+ filter(
+ available_server_keys.__contains__, server_key_algo_list
+ )
+ )
+ else:
+ agreed_keys = list(
+ filter(server_key_algo_list.__contains__, self.preferred_keys)
+ )
+ if len(agreed_keys) == 0:
+ raise IncompatiblePeer(
+ "Incompatible ssh peer (no acceptable host key)"
+ ) # noqa
+ self.host_key_type = agreed_keys[0]
+ if self.server_mode and (self.get_server_key() is None):
+ raise IncompatiblePeer(
+ "Incompatible ssh peer (can't match requested host key type)"
+ ) # noqa
+ self._log_agreement("HostKey", agreed_keys[0], agreed_keys[0])
+
+ if self.server_mode:
+ agreed_local_ciphers = list(
+ filter(
+ self.preferred_ciphers.__contains__,
+ server_encrypt_algo_list,
+ )
+ )
+ agreed_remote_ciphers = list(
+ filter(
+ self.preferred_ciphers.__contains__,
+ client_encrypt_algo_list,
+ )
+ )
+ else:
+ agreed_local_ciphers = list(
+ filter(
+ client_encrypt_algo_list.__contains__,
+ self.preferred_ciphers,
+ )
+ )
+ agreed_remote_ciphers = list(
+ filter(
+ server_encrypt_algo_list.__contains__,
+ self.preferred_ciphers,
+ )
+ )
+ if len(agreed_local_ciphers) == 0 or len(agreed_remote_ciphers) == 0:
+ raise IncompatiblePeer(
+ "Incompatible ssh server (no acceptable ciphers)"
+ ) # noqa
+ self.local_cipher = agreed_local_ciphers[0]
+ self.remote_cipher = agreed_remote_ciphers[0]
+ self._log_agreement(
+ "Cipher", local=self.local_cipher, remote=self.remote_cipher
+ )
+
+ if self.server_mode:
+ agreed_remote_macs = list(
+ filter(self.preferred_macs.__contains__, client_mac_algo_list)
+ )
+ agreed_local_macs = list(
+ filter(self.preferred_macs.__contains__, server_mac_algo_list)
+ )
+ else:
+ agreed_local_macs = list(
+ filter(client_mac_algo_list.__contains__, self.preferred_macs)
+ )
+ agreed_remote_macs = list(
+ filter(server_mac_algo_list.__contains__, self.preferred_macs)
+ )
+ if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
+ raise IncompatiblePeer(
+ "Incompatible ssh server (no acceptable macs)"
+ )
+ self.local_mac = agreed_local_macs[0]
+ self.remote_mac = agreed_remote_macs[0]
+ self._log_agreement(
+ "MAC", local=self.local_mac, remote=self.remote_mac
+ )
+
+ if self.server_mode:
+ agreed_remote_compression = list(
+ filter(
+ self.preferred_compression.__contains__,
+ client_compress_algo_list,
+ )
+ )
+ agreed_local_compression = list(
+ filter(
+ self.preferred_compression.__contains__,
+ server_compress_algo_list,
+ )
+ )
+ else:
+ agreed_local_compression = list(
+ filter(
+ client_compress_algo_list.__contains__,
+ self.preferred_compression,
+ )
+ )
+ agreed_remote_compression = list(
+ filter(
+ server_compress_algo_list.__contains__,
+ self.preferred_compression,
+ )
+ )
+ if (
+ len(agreed_local_compression) == 0
+ or len(agreed_remote_compression) == 0
+ ):
+ msg = "Incompatible ssh server (no acceptable compression)"
+ msg += " {!r} {!r} {!r}"
+ raise IncompatiblePeer(
+ msg.format(
+ agreed_local_compression,
+ agreed_remote_compression,
+ self.preferred_compression,
+ )
+ )
+ self.local_compression = agreed_local_compression[0]
+ self.remote_compression = agreed_remote_compression[0]
+ self._log_agreement(
+ "Compression",
+ local=self.local_compression,
+ remote=self.remote_compression,
+ )
+ self._log(DEBUG, "=== End of kex handshake ===")
+
+ # save for computing hash later...
+ # now wait! openssh has a bug (and others might too) where there are
+ # actually some extra bytes (one NUL byte in openssh's case) added to
+ # the end of the packet but not parsed. turns out we need to throw
+ # away those bytes because they aren't part of the hash.
+ self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
+
+ def _activate_inbound(self):
+ """switch on newly negotiated encryption parameters for
+ inbound traffic"""
+ block_size = self._cipher_info[self.remote_cipher]["block-size"]
+ if self.server_mode:
+ IV_in = self._compute_key("A", block_size)
+ key_in = self._compute_key(
+ "C", self._cipher_info[self.remote_cipher]["key-size"]
+ )
+ else:
+ IV_in = self._compute_key("B", block_size)
+ key_in = self._compute_key(
+ "D", self._cipher_info[self.remote_cipher]["key-size"]
+ )
+ engine = self._get_cipher(
+ self.remote_cipher, key_in, IV_in, self._DECRYPT
+ )
+ etm = "etm@openssh.com" in self.remote_mac
+ mac_size = self._mac_info[self.remote_mac]["size"]
+ mac_engine = self._mac_info[self.remote_mac]["class"]
+ # initial mac keys are done in the hash's natural size (not the
+ # potentially truncated transmission size)
+ if self.server_mode:
+ mac_key = self._compute_key("E", mac_engine().digest_size)
+ else:
+ mac_key = self._compute_key("F", mac_engine().digest_size)
+ self.packetizer.set_inbound_cipher(
+ engine, block_size, mac_engine, mac_size, mac_key, etm=etm
+ )
+ compress_in = self._compression_info[self.remote_compression][1]
+ if compress_in is not None and (
+ self.remote_compression != "zlib@openssh.com" or self.authenticated
+ ):
+ self._log(DEBUG, "Switching on inbound compression ...")
+ self.packetizer.set_inbound_compressor(compress_in())
+ # Reset inbound sequence number if strict mode.
+ if self.agreed_on_strict_kex:
+ self._log(
+ DEBUG,
+ "Resetting inbound seqno after NEWKEYS due to strict mode",
+ )
+ self.packetizer.reset_seqno_in()
+
+ def _activate_outbound(self):
+ """switch on newly negotiated encryption parameters for
+ outbound traffic"""
+ m = Message()
+ m.add_byte(cMSG_NEWKEYS)
+ self._send_message(m)
+ # Reset outbound sequence number if strict mode.
+ if self.agreed_on_strict_kex:
+ self._log(
+ DEBUG,
+ "Resetting outbound seqno after NEWKEYS due to strict mode",
+ )
+ self.packetizer.reset_seqno_out()
+ block_size = self._cipher_info[self.local_cipher]["block-size"]
+ if self.server_mode:
+ IV_out = self._compute_key("B", block_size)
+ key_out = self._compute_key(
+ "D", self._cipher_info[self.local_cipher]["key-size"]
+ )
+ else:
+ IV_out = self._compute_key("A", block_size)
+ key_out = self._compute_key(
+ "C", self._cipher_info[self.local_cipher]["key-size"]
+ )
+ engine = self._get_cipher(
+ self.local_cipher, key_out, IV_out, self._ENCRYPT
+ )
+ etm = "etm@openssh.com" in self.local_mac
+ mac_size = self._mac_info[self.local_mac]["size"]
+ mac_engine = self._mac_info[self.local_mac]["class"]
+ # initial mac keys are done in the hash's natural size (not the
+ # potentially truncated transmission size)
+ if self.server_mode:
+ mac_key = self._compute_key("F", mac_engine().digest_size)
+ else:
+ mac_key = self._compute_key("E", mac_engine().digest_size)
+ sdctr = self.local_cipher.endswith("-ctr")
+ self.packetizer.set_outbound_cipher(
+ engine, block_size, mac_engine, mac_size, mac_key, sdctr, etm=etm
+ )
+ compress_out = self._compression_info[self.local_compression][0]
+ if compress_out is not None and (
+ self.local_compression != "zlib@openssh.com" or self.authenticated
+ ):
+ self._log(DEBUG, "Switching on outbound compression ...")
+ self.packetizer.set_outbound_compressor(compress_out())
+ if not self.packetizer.need_rekey():
+ self.in_kex = False
+ # If client indicated extension support, send that packet immediately
+ if (
+ self.server_mode
+ and self.server_sig_algs
+ and self._remote_ext_info == "ext-info-c"
+ ):
+ extensions = {"server-sig-algs": ",".join(self.preferred_pubkeys)}
+ m = Message()
+ m.add_byte(cMSG_EXT_INFO)
+ m.add_int(len(extensions))
+ for name, value in sorted(extensions.items()):
+ m.add_string(name)
+ m.add_string(value)
+ self._send_message(m)
+ # we always expect to receive NEWKEYS now
+ self._expect_packet(MSG_NEWKEYS)
+
+ def _auth_trigger(self):
+ self.authenticated = True
+ # delayed initiation of compression
+ if self.local_compression == "zlib@openssh.com":
+ compress_out = self._compression_info[self.local_compression][0]
+ self._log(DEBUG, "Switching on outbound compression ...")
+ self.packetizer.set_outbound_compressor(compress_out())
+ if self.remote_compression == "zlib@openssh.com":
+ compress_in = self._compression_info[self.remote_compression][1]
+ self._log(DEBUG, "Switching on inbound compression ...")
+ self.packetizer.set_inbound_compressor(compress_in())
+
+ def _parse_ext_info(self, msg):
+ # Packet is a count followed by that many key-string to possibly-bytes
+ # pairs.
+ extensions = {}
+ for _ in range(msg.get_int()):
+ name = msg.get_text()
+ value = msg.get_string()
+ extensions[name] = value
+ self._log(DEBUG, "Got EXT_INFO: {}".format(extensions))
+ # NOTE: this should work ok in cases where a server sends /two/ such
+ # messages; the RFC explicitly states a 2nd one should overwrite the
+ # 1st.
+ self.server_extensions = extensions
+
+ def _parse_newkeys(self, m):
+ self._log(DEBUG, "Switch to new keys ...")
+ self._activate_inbound()
+ # can also free a bunch of stuff here
+ self.local_kex_init = self.remote_kex_init = None
+ self.K = None
+ self.kex_engine = None
+ if self.server_mode and (self.auth_handler is None):
+ # create auth handler for server mode
+ self.auth_handler = AuthHandler(self)
+ if not self.initial_kex_done:
+ # this was the first key exchange
+ # (also signal to packetizer as it sometimes wants to know this
+ # status as well, eg when seqnos rollover)
+ self.initial_kex_done = self.packetizer._initial_kex_done = True
+ # send an event?
+ if self.completion_event is not None:
+ self.completion_event.set()
+ # it's now okay to send data again (if this was a re-key)
+ if not self.packetizer.need_rekey():
+ self.in_kex = False
+ self.clear_to_send_lock.acquire()
+ try:
+ self.clear_to_send.set()
+ finally:
+ self.clear_to_send_lock.release()
+ return
+
+ def _parse_disconnect(self, m):
+ code = m.get_int()
+ desc = m.get_text()
+ self._log(INFO, "Disconnect (code {:d}): {}".format(code, desc))
+
+ def _parse_global_request(self, m):
+ kind = m.get_text()
+ self._log(DEBUG, 'Received global request "{}"'.format(kind))
+ want_reply = m.get_boolean()
+ if not self.server_mode:
+ self._log(
+ DEBUG,
+ 'Rejecting "{}" global request from server.'.format(kind),
+ )
+ ok = False
+ elif kind == "tcpip-forward":
+ address = m.get_text()
+ port = m.get_int()
+ ok = self.server_object.check_port_forward_request(address, port)
+ if ok:
+ ok = (ok,)
+ elif kind == "cancel-tcpip-forward":
+ address = m.get_text()
+ port = m.get_int()
+ self.server_object.cancel_port_forward_request(address, port)
+ ok = True
+ else:
+ ok = self.server_object.check_global_request(kind, m)
+ extra = ()
+ if type(ok) is tuple:
+ extra = ok
+ ok = True
+ if want_reply:
+ msg = Message()
+ if ok:
+ msg.add_byte(cMSG_REQUEST_SUCCESS)
+ msg.add(*extra)
+ else:
+ msg.add_byte(cMSG_REQUEST_FAILURE)
+ self._send_message(msg)
+
+ def _parse_request_success(self, m):
+ self._log(DEBUG, "Global request successful.")
+ self.global_response = m
+ if self.completion_event is not None:
+ self.completion_event.set()
+
+ def _parse_request_failure(self, m):
+ self._log(DEBUG, "Global request denied.")
+ self.global_response = None
+ if self.completion_event is not None:
+ self.completion_event.set()
+
+ def _parse_channel_open_success(self, m):
+ chanid = m.get_int()
+ server_chanid = m.get_int()
+ server_window_size = m.get_int()
+ server_max_packet_size = m.get_int()
+ chan = self._channels.get(chanid)
+ if chan is None:
+ self._log(WARNING, "Success for unrequested channel! [??]")
+ return
+ self.lock.acquire()
+ try:
+ chan._set_remote_channel(
+ server_chanid, server_window_size, server_max_packet_size
+ )
+ self._log(DEBUG, "Secsh channel {:d} opened.".format(chanid))
+ if chanid in self.channel_events:
+ self.channel_events[chanid].set()
+ del self.channel_events[chanid]
+ finally:
+ self.lock.release()
+ return
+
+ def _parse_channel_open_failure(self, m):
+ chanid = m.get_int()
+ reason = m.get_int()
+ reason_str = m.get_text()
+ m.get_text() # ignored language
+ reason_text = CONNECTION_FAILED_CODE.get(reason, "(unknown code)")
+ self._log(
+ ERROR,
+ "Secsh channel {:d} open FAILED: {}: {}".format(
+ chanid, reason_str, reason_text
+ ),
+ )
+ self.lock.acquire()
+ try:
+ self.saved_exception = ChannelException(reason, reason_text)
+ if chanid in self.channel_events:
+ self._channels.delete(chanid)
+ if chanid in self.channel_events:
+ self.channel_events[chanid].set()
+ del self.channel_events[chanid]
+ finally:
+ self.lock.release()
+ return
+
+ def _parse_channel_open(self, m):
+ kind = m.get_text()
+ chanid = m.get_int()
+ initial_window_size = m.get_int()
+ max_packet_size = m.get_int()
+ reject = False
+ if (
+ kind == "auth-agent@openssh.com"
+ and self._forward_agent_handler is not None
+ ):
+ self._log(DEBUG, "Incoming forward agent connection")
+ self.lock.acquire()
+ try:
+ my_chanid = self._next_channel()
+ finally:
+ self.lock.release()
+ elif (kind == "x11") and (self._x11_handler is not None):
+ origin_addr = m.get_text()
+ origin_port = m.get_int()
+ self._log(
+ DEBUG,
+ "Incoming x11 connection from {}:{:d}".format(
+ origin_addr, origin_port
+ ),
+ )
+ self.lock.acquire()
+ try:
+ my_chanid = self._next_channel()
+ finally:
+ self.lock.release()
+ elif (kind == "forwarded-tcpip") and (self._tcp_handler is not None):
+ server_addr = m.get_text()
+ server_port = m.get_int()
+ origin_addr = m.get_text()
+ origin_port = m.get_int()
+ self._log(
+ DEBUG,
+ "Incoming tcp forwarded connection from {}:{:d}".format(
+ origin_addr, origin_port
+ ),
+ )
+ self.lock.acquire()
+ try:
+ my_chanid = self._next_channel()
+ finally:
+ self.lock.release()
+ elif not self.server_mode:
+ self._log(
+ DEBUG,
+ 'Rejecting "{}" channel request from server.'.format(kind),
+ )
+ reject = True
+ reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+ else:
+ self.lock.acquire()
+ try:
+ my_chanid = self._next_channel()
+ finally:
+ self.lock.release()
+ if kind == "direct-tcpip":
+ # handle direct-tcpip requests coming from the client
+ dest_addr = m.get_text()
+ dest_port = m.get_int()
+ origin_addr = m.get_text()
+ origin_port = m.get_int()
+ reason = self.server_object.check_channel_direct_tcpip_request(
+ my_chanid,
+ (origin_addr, origin_port),
+ (dest_addr, dest_port),
+ )
+ else:
+ reason = self.server_object.check_channel_request(
+ kind, my_chanid
+ )
+ if reason != OPEN_SUCCEEDED:
+ self._log(
+ DEBUG,
+ 'Rejecting "{}" channel request from client.'.format(kind),
+ )
+ reject = True
+ if reject:
+ msg = Message()
+ msg.add_byte(cMSG_CHANNEL_OPEN_FAILURE)
+ msg.add_int(chanid)
+ msg.add_int(reason)
+ msg.add_string("")
+ msg.add_string("en")
+ self._send_message(msg)
+ return
+
+ chan = Channel(my_chanid)
+ self.lock.acquire()
+ try:
+ self._channels.put(my_chanid, chan)
+ self.channels_seen[my_chanid] = True
+ chan._set_transport(self)
+ chan._set_window(
+ self.default_window_size, self.default_max_packet_size
+ )
+ chan._set_remote_channel(
+ chanid, initial_window_size, max_packet_size
+ )
+ finally:
+ self.lock.release()
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_OPEN_SUCCESS)
+ m.add_int(chanid)
+ m.add_int(my_chanid)
+ m.add_int(self.default_window_size)
+ m.add_int(self.default_max_packet_size)
+ self._send_message(m)
+ self._log(
+ DEBUG, "Secsh channel {:d} ({}) opened.".format(my_chanid, kind)
+ )
+ if kind == "auth-agent@openssh.com":
+ self._forward_agent_handler(chan)
+ elif kind == "x11":
+ self._x11_handler(chan, (origin_addr, origin_port))
+ elif kind == "forwarded-tcpip":
+ chan.origin_addr = (origin_addr, origin_port)
+ self._tcp_handler(
+ chan, (origin_addr, origin_port), (server_addr, server_port)
+ )
+ else:
+ self._queue_incoming_channel(chan)
+
+ def _parse_debug(self, m):
+ m.get_boolean() # always_display
+ msg = m.get_string()
+ m.get_string() # language
+ self._log(DEBUG, "Debug msg: {}".format(util.safe_string(msg)))
+
+ def _get_subsystem_handler(self, name):
+ try:
+ self.lock.acquire()
+ if name not in self.subsystem_table:
+ return None, [], {}
+ return self.subsystem_table[name]
+ finally:
+ self.lock.release()
+
+ _channel_handler_table = {
+ MSG_CHANNEL_SUCCESS: Channel._request_success,
+ MSG_CHANNEL_FAILURE: Channel._request_failed,
+ MSG_CHANNEL_DATA: Channel._feed,
+ MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
+ MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
+ MSG_CHANNEL_REQUEST: Channel._handle_request,
+ MSG_CHANNEL_EOF: Channel._handle_eof,
+ MSG_CHANNEL_CLOSE: Channel._handle_close,
+ }
+
+
+# TODO 4.0: drop this, we barely use it ourselves, it badly replicates the
+# Transport-internal algorithm management, AND does so in a way which doesn't
+# honor newer things like disabled_algorithms!
+class SecurityOptions:
+ """
+ Simple object containing the security preferences of an ssh transport.
+ These are tuples of acceptable ciphers, digests, key types, and key
+ exchange algorithms, listed in order of preference.
+
+ Changing the contents and/or order of these fields affects the underlying
+ `.Transport` (but only if you change them before starting the session).
+ If you try to add an algorithm that paramiko doesn't recognize,
+ ``ValueError`` will be raised. If you try to assign something besides a
+ tuple to one of the fields, ``TypeError`` will be raised.
+ """
+
+ __slots__ = "_transport"
+
+ def __init__(self, transport):
+ self._transport = transport
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+ """
+ return "<paramiko.SecurityOptions for {!r}>".format(self._transport)
+
+ def _set(self, name, orig, x):
+ if type(x) is list:
+ x = tuple(x)
+ if type(x) is not tuple:
+ raise TypeError("expected tuple or list")
+ possible = list(getattr(self._transport, orig).keys())
+ forbidden = [n for n in x if n not in possible]
+ if len(forbidden) > 0:
+ raise ValueError("unknown cipher")
+ setattr(self._transport, name, x)
+
+ @property
+ def ciphers(self):
+ """Symmetric encryption ciphers"""
+ return self._transport._preferred_ciphers
+
+ @ciphers.setter
+ def ciphers(self, x):
+ self._set("_preferred_ciphers", "_cipher_info", x)
+
+ @property
+ def digests(self):
+ """Digest (one-way hash) algorithms"""
+ return self._transport._preferred_macs
+
+ @digests.setter
+ def digests(self, x):
+ self._set("_preferred_macs", "_mac_info", x)
+
+ @property
+ def key_types(self):
+ """Public-key algorithms"""
+ return self._transport._preferred_keys
+
+ @key_types.setter
+ def key_types(self, x):
+ self._set("_preferred_keys", "_key_info", x)
+
+ @property
+ def kex(self):
+ """Key exchange algorithms"""
+ return self._transport._preferred_kex
+
+ @kex.setter
+ def kex(self, x):
+ self._set("_preferred_kex", "_kex_info", x)
+
+ @property
+ def compression(self):
+ """Compression algorithms"""
+ return self._transport._preferred_compression
+
+ @compression.setter
+ def compression(self, x):
+ self._set("_preferred_compression", "_compression_info", x)
+
+
+class ChannelMap:
+ def __init__(self):
+ # (id -> Channel)
+ self._map = weakref.WeakValueDictionary()
+ self._lock = threading.Lock()
+
+ def put(self, chanid, chan):
+ self._lock.acquire()
+ try:
+ self._map[chanid] = chan
+ finally:
+ self._lock.release()
+
+ def get(self, chanid):
+ self._lock.acquire()
+ try:
+ return self._map.get(chanid, None)
+ finally:
+ self._lock.release()
+
+ def delete(self, chanid):
+ self._lock.acquire()
+ try:
+ try:
+ del self._map[chanid]
+ except KeyError:
+ pass
+ finally:
+ self._lock.release()
+
+ def values(self):
+ self._lock.acquire()
+ try:
+ return list(self._map.values())
+ finally:
+ self._lock.release()
+
+ def __len__(self):
+ self._lock.acquire()
+ try:
+ return len(self._map)
+ finally:
+ self._lock.release()
+
+
+class ServiceRequestingTransport(Transport):
+ """
+ Transport, but also handling service requests, like it oughtta!
+
+ .. versionadded:: 3.2
+ """
+
+ # NOTE: this purposefully duplicates some of the parent class in order to
+ # modernize, refactor, etc. The intent is that eventually we will collapse
+ # this one onto the parent in a backwards incompatible release.
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._service_userauth_accepted = False
+ self._handler_table[MSG_SERVICE_ACCEPT] = self._parse_service_accept
+
+ def _parse_service_accept(self, m):
+ service = m.get_text()
+ # Short-circuit for any service name not ssh-userauth.
+ # NOTE: it's technically possible for 'service name' in
+ # SERVICE_REQUEST/ACCEPT messages to be "ssh-connection" --
+ # but I don't see evidence of Paramiko ever initiating or expecting to
+ # receive one of these. We /do/ see the 'service name' field in
+ # MSG_USERAUTH_REQUEST/ACCEPT/FAILURE set to this string, but that is a
+ # different set of handlers, so...!
+ if service != "ssh-userauth":
+ # TODO 4.0: consider erroring here (with an ability to opt out?)
+ # instead as it probably means something went Very Wrong.
+ self._log(
+ DEBUG, 'Service request "{}" accepted (?)'.format(service)
+ )
+ return
+ # Record that we saw a service-userauth acceptance, meaning we are free
+ # to submit auth requests.
+ self._service_userauth_accepted = True
+ self._log(DEBUG, "MSG_SERVICE_ACCEPT received; auth may begin")
+
+ def ensure_session(self):
+ # Make sure we're not trying to auth on a not-yet-open or
+ # already-closed transport session; that's our responsibility, not that
+ # of AuthHandler.
+ if (not self.active) or (not self.initial_kex_done):
+ # TODO: better error message? this can happen in many places, eg
+ # user error (authing before connecting) or developer error (some
+ # improperly handled pre/mid auth shutdown didn't become fatal
+ # enough). The latter is much more common & should ideally be fixed
+ # by terminating things harder?
+ raise SSHException("No existing session")
+ # Also make sure we've actually been told we are allowed to auth.
+ if self._service_userauth_accepted:
+ return
+ # Or request to do so, otherwise.
+ m = Message()
+ m.add_byte(cMSG_SERVICE_REQUEST)
+ m.add_string("ssh-userauth")
+ self._log(DEBUG, "Sending MSG_SERVICE_REQUEST: ssh-userauth")
+ self._send_message(m)
+ # Now we wait to hear back; the user is expecting a blocking-style auth
+ # request so there's no point giving control back anywhere.
+ while not self._service_userauth_accepted:
+ # TODO: feels like we're missing an AuthHandler Event like
+ # 'self.auth_event' which is set when AuthHandler shuts down in
+ # ways good AND bad. Transport only seems to have completion_event
+ # which is unclear re: intent, eg it's set by newkeys which always
+ # happens on connection, so it'll always be set by the time we get
+ # here.
+ # NOTE: this copies the timing of event.wait() in
+ # AuthHandler.wait_for_response, re: 1/10 of a second. Could
+ # presumably be smaller, but seems unlikely this period is going to
+ # be "too long" for any code doing ssh networking...
+ time.sleep(0.1)
+ self.auth_handler = self.get_auth_handler()
+
+ def get_auth_handler(self):
+ # NOTE: using new sibling subclass instead of classic AuthHandler
+ return AuthOnlyHandler(self)
+
+ def auth_none(self, username):
+ # TODO 4.0: merge to parent, preserving (most of) docstring
+ self.ensure_session()
+ return self.auth_handler.auth_none(username)
+
+ def auth_password(self, username, password, fallback=True):
+ # TODO 4.0: merge to parent, preserving (most of) docstring
+ self.ensure_session()
+ try:
+ return self.auth_handler.auth_password(username, password)
+ except BadAuthenticationType as e:
+ # if password auth isn't allowed, but keyboard-interactive *is*,
+ # try to fudge it
+ if not fallback or ("keyboard-interactive" not in e.allowed_types):
+ raise
+ try:
+
+ def handler(title, instructions, fields):
+ if len(fields) > 1:
+ raise SSHException("Fallback authentication failed.")
+ if len(fields) == 0:
+ # for some reason, at least on os x, a 2nd request will
+ # be made with zero fields requested. maybe it's just
+ # to try to fake out automated scripting of the exact
+ # type we're doing here. *shrug* :)
+ return []
+ return [password]
+
+ return self.auth_interactive(username, handler)
+ except SSHException:
+ # attempt to fudge failed; just raise the original exception
+ raise e
+
+ def auth_publickey(self, username, key):
+ # TODO 4.0: merge to parent, preserving (most of) docstring
+ self.ensure_session()
+ return self.auth_handler.auth_publickey(username, key)
+
+ def auth_interactive(self, username, handler, submethods=""):
+ # TODO 4.0: merge to parent, preserving (most of) docstring
+ self.ensure_session()
+ return self.auth_handler.auth_interactive(
+ username, handler, submethods
+ )
+
+ def auth_interactive_dumb(self, username, handler=None, submethods=""):
+ # TODO 4.0: merge to parent, preserving (most of) docstring
+ # NOTE: legacy impl omitted equiv of ensure_session since it just wraps
+ # another call to an auth method. however we reinstate it for
+ # consistency reasons.
+ self.ensure_session()
+ if not handler:
+
+ def handler(title, instructions, prompt_list):
+ answers = []
+ if title:
+ print(title.strip())
+ if instructions:
+ print(instructions.strip())
+ for prompt, show_input in prompt_list:
+ print(prompt.strip(), end=" ")
+ answers.append(input())
+ return answers
+
+ return self.auth_interactive(username, handler, submethods)
+
+ def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds):
+ # TODO 4.0: merge to parent, preserving (most of) docstring
+ self.ensure_session()
+ self.auth_handler = self.get_auth_handler()
+ return self.auth_handler.auth_gssapi_with_mic(
+ username, gss_host, gss_deleg_creds
+ )
+
+ def auth_gssapi_keyex(self, username):
+ # TODO 4.0: merge to parent, preserving (most of) docstring
+ self.ensure_session()
+ self.auth_handler = self.get_auth_handler()
+ return self.auth_handler.auth_gssapi_keyex(username)
diff --git a/paramiko/util.py b/paramiko/util.py
new file mode 100644
index 0000000..f1e33a5
--- /dev/null
+++ b/paramiko/util.py
@@ -0,0 +1,337 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Useful functions used by the rest of paramiko.
+"""
+
+
+import sys
+import struct
+import traceback
+import threading
+import logging
+
+from paramiko.common import (
+ DEBUG,
+ zero_byte,
+ xffffffff,
+ max_byte,
+ byte_ord,
+ byte_chr,
+)
+from paramiko.config import SSHConfig
+
+
+def inflate_long(s, always_positive=False):
+ """turns a normalized byte string into a long-int
+ (adapted from Crypto.Util.number)"""
+ out = 0
+ negative = 0
+ if not always_positive and (len(s) > 0) and (byte_ord(s[0]) >= 0x80):
+ negative = 1
+ if len(s) % 4:
+ filler = zero_byte
+ if negative:
+ filler = max_byte
+ # never convert this to ``s +=`` because this is a string, not a number
+ # noinspection PyAugmentAssignment
+ s = filler * (4 - len(s) % 4) + s
+ for i in range(0, len(s), 4):
+ out = (out << 32) + struct.unpack(">I", s[i : i + 4])[0]
+ if negative:
+ out -= 1 << (8 * len(s))
+ return out
+
+
+def deflate_long(n, add_sign_padding=True):
+ """turns a long-int into a normalized byte string
+ (adapted from Crypto.Util.number)"""
+ # after much testing, this algorithm was deemed to be the fastest
+ s = bytes()
+ n = int(n)
+ while (n != 0) and (n != -1):
+ s = struct.pack(">I", n & xffffffff) + s
+ n >>= 32
+ # strip off leading zeros, FFs
+ for i in enumerate(s):
+ if (n == 0) and (i[1] != 0):
+ break
+ if (n == -1) and (i[1] != 0xFF):
+ break
+ else:
+ # degenerate case, n was either 0 or -1
+ i = (0,)
+ if n == 0:
+ s = zero_byte
+ else:
+ s = max_byte
+ s = s[i[0] :]
+ if add_sign_padding:
+ if (n == 0) and (byte_ord(s[0]) >= 0x80):
+ s = zero_byte + s
+ if (n == -1) and (byte_ord(s[0]) < 0x80):
+ s = max_byte + s
+ return s
+
+
+def format_binary(data, prefix=""):
+ x = 0
+ out = []
+ while len(data) > x + 16:
+ out.append(format_binary_line(data[x : x + 16]))
+ x += 16
+ if x < len(data):
+ out.append(format_binary_line(data[x:]))
+ return [prefix + line for line in out]
+
+
+def format_binary_line(data):
+ left = " ".join(["{:02X}".format(byte_ord(c)) for c in data])
+ right = "".join(
+ [".{:c}..".format(byte_ord(c))[(byte_ord(c) + 63) // 95] for c in data]
+ )
+ return "{:50s} {}".format(left, right)
+
+
+def safe_string(s):
+ out = b""
+ for c in s:
+ i = byte_ord(c)
+ if 32 <= i <= 127:
+ out += byte_chr(i)
+ else:
+ out += b("%{:02X}".format(i))
+ return out
+
+
+def bit_length(n):
+ try:
+ return n.bit_length()
+ except AttributeError:
+ norm = deflate_long(n, False)
+ hbyte = byte_ord(norm[0])
+ if hbyte == 0:
+ return 1
+ bitlen = len(norm) * 8
+ while not (hbyte & 0x80):
+ hbyte <<= 1
+ bitlen -= 1
+ return bitlen
+
+
+def tb_strings():
+ return "".join(traceback.format_exception(*sys.exc_info())).split("\n")
+
+
+def generate_key_bytes(hash_alg, salt, key, nbytes):
+ """
+ Given a password, passphrase, or other human-source key, scramble it
+ through a secure hash into some keyworthy bytes. This specific algorithm
+ is used for encrypting/decrypting private key files.
+
+ :param function hash_alg: A function which creates a new hash object, such
+ as ``hashlib.sha256``.
+ :param salt: data to salt the hash with.
+ :type bytes salt: Hash salt bytes.
+ :param str key: human-entered password or passphrase.
+ :param int nbytes: number of bytes to generate.
+ :return: Key data, as `bytes`.
+ """
+ keydata = bytes()
+ digest = bytes()
+ if len(salt) > 8:
+ salt = salt[:8]
+ while nbytes > 0:
+ hash_obj = hash_alg()
+ if len(digest) > 0:
+ hash_obj.update(digest)
+ hash_obj.update(b(key))
+ hash_obj.update(salt)
+ digest = hash_obj.digest()
+ size = min(nbytes, len(digest))
+ keydata += digest[:size]
+ nbytes -= size
+ return keydata
+
+
+def load_host_keys(filename):
+ """
+ Read a file of known SSH host keys, in the format used by openssh, and
+ return a compound dict of ``hostname -> keytype ->`` `PKey
+ <paramiko.pkey.PKey>`. The hostname may be an IP address or DNS name. The
+ keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``.
+
+ This type of file unfortunately doesn't exist on Windows, but on posix,
+ it will usually be stored in ``os.path.expanduser("~/.ssh/known_hosts")``.
+
+ Since 1.5.3, this is just a wrapper around `.HostKeys`.
+
+ :param str filename: name of the file to read host keys from
+ :return:
+ nested dict of `.PKey` objects, indexed by hostname and then keytype
+ """
+ from paramiko.hostkeys import HostKeys
+
+ return HostKeys(filename)
+
+
+def parse_ssh_config(file_obj):
+ """
+ Provided only as a backward-compatible wrapper around `.SSHConfig`.
+
+ .. deprecated:: 2.7
+ Use `SSHConfig.from_file` instead.
+ """
+ config = SSHConfig()
+ config.parse(file_obj)
+ return config
+
+
+def lookup_ssh_host_config(hostname, config):
+ """
+ Provided only as a backward-compatible wrapper around `.SSHConfig`.
+ """
+ return config.lookup(hostname)
+
+
+def mod_inverse(x, m):
+ # it's crazy how small Python can make this function.
+ u1, u2, u3 = 1, 0, m
+ v1, v2, v3 = 0, 1, x
+
+ while v3 > 0:
+ q = u3 // v3
+ u1, v1 = v1, u1 - v1 * q
+ u2, v2 = v2, u2 - v2 * q
+ u3, v3 = v3, u3 - v3 * q
+ if u2 < 0:
+ u2 += m
+ return u2
+
+
+_g_thread_data = threading.local()
+_g_thread_counter = 0
+_g_thread_lock = threading.Lock()
+
+
+def get_thread_id():
+ global _g_thread_data, _g_thread_counter, _g_thread_lock
+ try:
+ return _g_thread_data.id
+ except AttributeError:
+ with _g_thread_lock:
+ _g_thread_counter += 1
+ _g_thread_data.id = _g_thread_counter
+ return _g_thread_data.id
+
+
+def log_to_file(filename, level=DEBUG):
+ """send paramiko logs to a logfile,
+ if they're not already going somewhere"""
+ logger = logging.getLogger("paramiko")
+ if len(logger.handlers) > 0:
+ return
+ logger.setLevel(level)
+ f = open(filename, "a")
+ handler = logging.StreamHandler(f)
+ frm = "%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d"
+ frm += " %(name)s: %(message)s"
+ handler.setFormatter(logging.Formatter(frm, "%Y%m%d-%H:%M:%S"))
+ logger.addHandler(handler)
+
+
+# make only one filter object, so it doesn't get applied more than once
+class PFilter:
+ def filter(self, record):
+ record._threadid = get_thread_id()
+ return True
+
+
+_pfilter = PFilter()
+
+
+def get_logger(name):
+ logger = logging.getLogger(name)
+ logger.addFilter(_pfilter)
+ return logger
+
+
+def constant_time_bytes_eq(a, b):
+ if len(a) != len(b):
+ return False
+ res = 0
+ # noinspection PyUnresolvedReferences
+ for i in range(len(a)): # noqa: F821
+ res |= byte_ord(a[i]) ^ byte_ord(b[i])
+ return res == 0
+
+
+class ClosingContextManager:
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+
+def clamp_value(minimum, val, maximum):
+ return max(minimum, min(val, maximum))
+
+
+def asbytes(s):
+ """
+ Coerce to bytes if possible or return unchanged.
+ """
+ try:
+ # Attempt to run through our version of b(), which does the Right Thing
+ # for unicode strings vs bytestrings, and raises TypeError if it's not
+ # one of those types.
+ return b(s)
+ except TypeError:
+ try:
+ # If it wasn't a string/byte/buffer-ish object, try calling an
+ # asbytes() method, which many of our internal classes implement.
+ return s.asbytes()
+ except AttributeError:
+ # Finally, just do nothing & assume this object is sufficiently
+ # byte-y or buffer-y that everything will work out (or that callers
+ # are capable of handling whatever it is.)
+ return s
+
+
+# TODO: clean this up / force callers to assume bytes OR unicode
+def b(s, encoding="utf8"):
+ """cast unicode or bytes to bytes"""
+ if isinstance(s, bytes):
+ return s
+ elif isinstance(s, str):
+ return s.encode(encoding)
+ else:
+ raise TypeError(f"Expected unicode or bytes, got {type(s)}")
+
+
+# TODO: clean this up / force callers to assume bytes OR unicode
+def u(s, encoding="utf8"):
+ """cast bytes or unicode to unicode"""
+ if isinstance(s, bytes):
+ return s.decode(encoding)
+ elif isinstance(s, str):
+ return s
+ else:
+ raise TypeError(f"Expected unicode or bytes, got {type(s)}")
diff --git a/paramiko/win_openssh.py b/paramiko/win_openssh.py
new file mode 100644
index 0000000..614b589
--- /dev/null
+++ b/paramiko/win_openssh.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2021 Lew Gordon <lew.gordon@genesys.com>
+# Copyright (C) 2022 Patrick Spendrin <ps_ml@gmx.de>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os.path
+import time
+
+PIPE_NAME = r"\\.\pipe\openssh-ssh-agent"
+
+
+def can_talk_to_agent():
+ # use os.listdir() instead of os.path.exists(), because os.path.exists()
+ # uses CreateFileW() API and the pipe cannot be reopen unless the server
+ # calls DisconnectNamedPipe().
+ dir_, name = os.path.split(PIPE_NAME)
+ name = name.lower()
+ return any(name == n.lower() for n in os.listdir(dir_))
+
+
+class OpenSSHAgentConnection:
+ def __init__(self):
+ while True:
+ try:
+ self._pipe = os.open(PIPE_NAME, os.O_RDWR | os.O_BINARY)
+ except OSError as e:
+ # retry when errno 22 which means that the server has not
+ # called DisconnectNamedPipe() yet.
+ if e.errno != 22:
+ raise
+ else:
+ break
+ time.sleep(0.1)
+
+ def send(self, data):
+ return os.write(self._pipe, data)
+
+ def recv(self, n):
+ return os.read(self._pipe, n)
+
+ def close(self):
+ return os.close(self._pipe)
diff --git a/paramiko/win_pageant.py b/paramiko/win_pageant.py
new file mode 100644
index 0000000..c927de6
--- /dev/null
+++ b/paramiko/win_pageant.py
@@ -0,0 +1,138 @@
+# Copyright (C) 2005 John Arbash-Meinel <john@arbash-meinel.com>
+# Modified up by: Todd Whiteman <ToddW@ActiveState.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Functions for communicating with Pageant, the basic windows ssh agent program.
+"""
+
+import array
+import ctypes.wintypes
+import platform
+import struct
+from paramiko.common import zero_byte
+from paramiko.util import b
+
+import _thread as thread
+
+from . import _winapi
+
+
+_AGENT_COPYDATA_ID = 0x804E50BA
+_AGENT_MAX_MSGLEN = 8192
+# Note: The WM_COPYDATA value is pulled from win32con, as a workaround
+# so we do not have to import this huge library just for this one variable.
+win32con_WM_COPYDATA = 74
+
+
+def _get_pageant_window_object():
+ return ctypes.windll.user32.FindWindowA(b"Pageant", b"Pageant")
+
+
+def can_talk_to_agent():
+ """
+ Check to see if there is a "Pageant" agent we can talk to.
+
+ This checks both if we have the required libraries (win32all or ctypes)
+ and if there is a Pageant currently running.
+ """
+ return bool(_get_pageant_window_object())
+
+
+if platform.architecture()[0] == "64bit":
+ ULONG_PTR = ctypes.c_uint64
+else:
+ ULONG_PTR = ctypes.c_uint32
+
+
+class COPYDATASTRUCT(ctypes.Structure):
+ """
+ ctypes implementation of
+ http://msdn.microsoft.com/en-us/library/windows/desktop/ms649010%28v=vs.85%29.aspx
+ """
+
+ _fields_ = [
+ ("num_data", ULONG_PTR),
+ ("data_size", ctypes.wintypes.DWORD),
+ ("data_loc", ctypes.c_void_p),
+ ]
+
+
+def _query_pageant(msg):
+ """
+ Communication with the Pageant process is done through a shared
+ memory-mapped file.
+ """
+ hwnd = _get_pageant_window_object()
+ if not hwnd:
+ # Raise a failure to connect exception, pageant isn't running anymore!
+ return None
+
+ # create a name for the mmap
+ map_name = f"PageantRequest{thread.get_ident():08x}"
+
+ pymap = _winapi.MemoryMap(
+ map_name, _AGENT_MAX_MSGLEN, _winapi.get_security_attributes_for_user()
+ )
+ with pymap:
+ pymap.write(msg)
+ # Create an array buffer containing the mapped filename
+ char_buffer = array.array("b", b(map_name) + zero_byte) # noqa
+ char_buffer_address, char_buffer_size = char_buffer.buffer_info()
+ # Create a string to use for the SendMessage function call
+ cds = COPYDATASTRUCT(
+ _AGENT_COPYDATA_ID, char_buffer_size, char_buffer_address
+ )
+
+ response = ctypes.windll.user32.SendMessageA(
+ hwnd, win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds)
+ )
+
+ if response > 0:
+ pymap.seek(0)
+ datalen = pymap.read(4)
+ retlen = struct.unpack(">I", datalen)[0]
+ return datalen + pymap.read(retlen)
+ return None
+
+
+class PageantConnection:
+ """
+ Mock "connection" to an agent which roughly approximates the behavior of
+ a unix local-domain socket (as used by Agent). Requests are sent to the
+ pageant daemon via special Windows magick, and responses are buffered back
+ for subsequent reads.
+ """
+
+ def __init__(self):
+ self._response = None
+
+ def send(self, data):
+ self._response = _query_pageant(data)
+
+ def recv(self, n):
+ if self._response is None:
+ return ""
+ ret = self._response[:n]
+ self._response = self._response[n:]
+ if self._response == "":
+ self._response = None
+ return ret
+
+ def close(self):
+ pass
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..f51e190
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+testpaths = tests
+python_files = *
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..c5ee105
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2003-2008 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA.
+
+import sys
+from setuptools import setup
+
+if sys.platform == "darwin":
+ import setup_helper
+
+ setup_helper.install_custom_make_tarball()
+
+long_description = open("README.rst").read()
+
+# Version info -- read without importing
+_locals = {}
+with open("paramiko/_version.py") as fp:
+ exec(fp.read(), None, _locals)
+version = _locals["__version__"]
+
+# Have to build extras_require dynamically because it doesn't allow
+# self-referencing and I hate repeating myself.
+extras_require = {
+ "gssapi": [
+ "pyasn1>=0.1.7",
+ 'gssapi>=1.4.1;platform_system!="Windows"',
+ 'pywin32>=2.1.8;platform_system=="Windows"',
+ ],
+ "invoke": ["invoke>=2.0"],
+ # TODO 4.0: remove entrypoint as irrelevant
+ "ed25519": [],
+}
+everything = []
+for subdeps in extras_require.values():
+ everything.extend(subdeps)
+extras_require["all"] = everything
+
+setup(
+ name="paramiko",
+ version=version,
+ description="SSH2 protocol library",
+ long_description=long_description,
+ author="Jeff Forcier",
+ author_email="jeff@bitprophet.org",
+ url="https://paramiko.org",
+ project_urls={
+ "Docs": "https://docs.paramiko.org",
+ "Source": "https://github.com/paramiko/paramiko",
+ "Issues": "https://github.com/paramiko/paramiko/issues",
+ "Changelog": "https://www.paramiko.org/changelog.html",
+ "CI": "https://app.circleci.com/pipelines/github/paramiko/paramiko",
+ },
+ packages=["paramiko"],
+ license="LGPL",
+ platforms="Posix; MacOS X; Windows",
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: "
+ "GNU Library or Lesser General Public License (LGPL)",
+ "Operating System :: OS Independent",
+ "Topic :: Internet",
+ "Topic :: Security :: Cryptography",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ ],
+ python_requires=">=3.6",
+ install_requires=[
+ "bcrypt>=3.2",
+ "cryptography>=3.3",
+ "pynacl>=1.5",
+ ],
+ extras_require=extras_require,
+)
diff --git a/setup_helper.py b/setup_helper.py
new file mode 100644
index 0000000..f290ea3
--- /dev/null
+++ b/setup_helper.py
@@ -0,0 +1,160 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# Note: Despite the copyright notice, this was submitted by John
+# Arbash Meinel. Thanks John!
+
+
+"""A small set of helper functions for dealing with setup issues"""
+
+import os
+import tarfile
+
+from distutils import log
+import distutils.archive_util
+from distutils.dir_util import mkpath
+from distutils.spawn import spawn
+
+try:
+ from pwd import getpwnam
+except ImportError:
+ getpwnam = None
+
+try:
+ from grp import getgrnam
+except ImportError:
+ getgrnam = None
+
+
+def _get_gid(name):
+ """Returns a gid, given a group name."""
+ if getgrnam is None or name is None:
+ return None
+ try:
+ result = getgrnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+
+def _get_uid(name):
+ """Returns an uid, given a user name."""
+ if getpwnam is None or name is None:
+ return None
+ try:
+ result = getpwnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+
+def make_tarball(
+ base_name,
+ base_dir,
+ compress="gzip",
+ verbose=0,
+ dry_run=0,
+ owner=None,
+ group=None,
+):
+ """Create a tar file from all the files under 'base_dir'.
+ This file may be compressed.
+
+ :param compress: Compression algorithms. Supported algorithms are:
+ 'gzip': (the default)
+ 'compress'
+ 'bzip2'
+ None
+ For 'gzip' and 'bzip2' the internal tarfile module will be used.
+ For 'compress' the .tar will be created using tarfile, and then
+ we will spawn 'compress' afterwards.
+ The output tar file will be named 'base_name' + ".tar",
+ possibly plus the appropriate compression extension (".gz",
+ ".bz2" or ".Z"). Return the output filename.
+ """
+ # XXX GNU tar 1.13 has a nifty option to add a prefix directory.
+ # It's pretty new, though, so we certainly can't require it --
+ # but it would be nice to take advantage of it to skip the
+ # "create a tree of hardlinks" step! (Would also be nice to
+ # detect GNU tar to use its 'z' option and save a step.)
+
+ compress_ext = {"gzip": ".gz", "bzip2": ".bz2", "compress": ".Z"}
+
+ # flags for compression program, each element of list will be an argument
+ tarfile_compress_flag = {"gzip": "gz", "bzip2": "bz2"}
+ compress_flags = {"compress": ["-f"]}
+
+ if compress is not None and compress not in compress_ext.keys():
+ raise ValueError(
+ "bad value for 'compress': must be None, 'gzip',"
+ "'bzip2' or 'compress'"
+ )
+
+ archive_name = base_name + ".tar"
+ if compress and compress in tarfile_compress_flag:
+ archive_name += compress_ext[compress]
+
+ mode = "w:" + tarfile_compress_flag.get(compress, "")
+
+ mkpath(os.path.dirname(archive_name), dry_run=dry_run)
+ log.info(f"Creating tar file {archive_name} with mode {mode}")
+
+ uid = _get_uid(owner)
+ gid = _get_gid(group)
+
+ def _set_uid_gid(tarinfo):
+ if gid is not None:
+ tarinfo.gid = gid
+ tarinfo.gname = group
+ if uid is not None:
+ tarinfo.uid = uid
+ tarinfo.uname = owner
+ return tarinfo
+
+ if not dry_run:
+ tar = tarfile.open(archive_name, mode=mode)
+ # This recursively adds everything underneath base_dir
+ try:
+ tar.add(base_dir, filter=_set_uid_gid)
+ finally:
+ tar.close()
+
+ if compress and compress not in tarfile_compress_flag:
+ spawn(
+ [compress] + compress_flags[compress] + [archive_name],
+ dry_run=dry_run,
+ )
+ return archive_name + compress_ext[compress]
+ else:
+ return archive_name
+
+
+_custom_formats = {
+ "gztar": (make_tarball, [("compress", "gzip")], "gzip'ed tar-file"),
+ "bztar": (make_tarball, [("compress", "bzip2")], "bzip2'ed tar-file"),
+ "ztar": (make_tarball, [("compress", "compress")], "compressed tar file"),
+ "tar": (make_tarball, [("compress", None)], "uncompressed tar file"),
+}
+
+# Hack in and insert ourselves into the distutils code base
+def install_custom_make_tarball():
+ distutils.archive_util.ARCHIVE_FORMATS.update(_custom_formats)
diff --git a/sites/docs/.readthedocs.yaml b/sites/docs/.readthedocs.yaml
new file mode 100644
index 0000000..3212c93
--- /dev/null
+++ b/sites/docs/.readthedocs.yaml
@@ -0,0 +1,13 @@
+version: 2
+
+build:
+ os: "ubuntu-22.04"
+ tools:
+ python: "3.7"
+
+python:
+ install:
+ - requirements: dev-requirements.txt
+
+sphinx:
+ configuration: sites/docs/conf.py
diff --git a/sites/docs/api/agent.rst b/sites/docs/api/agent.rst
new file mode 100644
index 0000000..f01ad97
--- /dev/null
+++ b/sites/docs/api/agent.rst
@@ -0,0 +1,6 @@
+SSH agents
+==========
+
+.. automodule:: paramiko.agent
+ :inherited-members:
+ :no-special-members:
diff --git a/sites/docs/api/auth.rst b/sites/docs/api/auth.rst
new file mode 100644
index 0000000..b6bce36
--- /dev/null
+++ b/sites/docs/api/auth.rst
@@ -0,0 +1,8 @@
+Authentication modules
+======================
+
+.. automodule:: paramiko.auth_strategy
+ :member-order: bysource
+
+.. automodule:: paramiko.auth_handler
+ :member-order: bysource
diff --git a/sites/docs/api/buffered_pipe.rst b/sites/docs/api/buffered_pipe.rst
new file mode 100644
index 0000000..54d87c5
--- /dev/null
+++ b/sites/docs/api/buffered_pipe.rst
@@ -0,0 +1,4 @@
+Buffered pipes
+==============
+
+.. automodule:: paramiko.buffered_pipe
diff --git a/sites/docs/api/channel.rst b/sites/docs/api/channel.rst
new file mode 100644
index 0000000..e71526f
--- /dev/null
+++ b/sites/docs/api/channel.rst
@@ -0,0 +1,4 @@
+Channel
+=======
+
+.. automodule:: paramiko.channel
diff --git a/sites/docs/api/client.rst b/sites/docs/api/client.rst
new file mode 100644
index 0000000..b201812
--- /dev/null
+++ b/sites/docs/api/client.rst
@@ -0,0 +1,5 @@
+Client
+======
+
+.. automodule:: paramiko.client
+ :member-order: bysource
diff --git a/sites/docs/api/config.rst b/sites/docs/api/config.rst
new file mode 100644
index 0000000..9015a77
--- /dev/null
+++ b/sites/docs/api/config.rst
@@ -0,0 +1,135 @@
+=============
+Configuration
+=============
+
+Paramiko **does not itself** leverage `OpenSSH-style config file directives
+<ssh_config>`_, but it **does** implement a parser for the format, which users
+can honor themselves (and is used by higher-level libraries, such as
+`Fabric`_).
+
+The API for this is `.SSHConfig`, which loads SSH config files from disk,
+file-like object, or string and exposes a "look up a hostname, get a dict of
+applicable keywords/values back" functionality.
+
+As with OpenSSH's own support, this dict will contain values from across the
+parsed file, depending on the order in which keywords were encountered and how
+specific or generic the ``Host`` or ``Match`` directives were.
+
+.. note:;
+ Result keys are lowercased for consistency and ease of deduping, as the
+ overall parsing/matching is itself case-insensitive. Thus, a source file
+ containing e.g. ``ProxyCommand`` will result in lookup results like
+ ``{"proxycommand": "shell command here"}``.
+
+
+.. _ssh-config-support:
+
+Keywords currently supported
+============================
+
+The following is an alphabetical list of which `ssh_config`_ directives
+Paramiko interprets during the parse/lookup process (as above, actual SSH
+connections **do not** reference parsed configs). Departures from `OpenSSH's
+implementation <ssh_config>`_ (e.g. to support backwards compat with older
+Paramiko releases) are included. A keyword by itself means no known departures.
+
+- ``AddressFamily``: used when looking up the local hostname for purposes of
+ expanding the ``%l``/``%L`` :ref:`tokens <TOKENS>` (this is actually a minor
+ value-add on top of OpenSSH, which doesn't actually honor this setting when
+ expanding ``%l``).
+- ``CanonicalDomains``
+
+ .. versionadded:: 2.7
+
+- ``CanonicalizeFallbackLocal``: when ``no``, triggers raising of
+ `.CouldNotCanonicalize` for target hostnames which do not successfully
+ canonicalize.
+
+ .. versionadded:: 2.7
+
+- ``CanonicalizeHostname``: along with the other ``Canonicaliz*`` settings
+ (sans ``CanonicalizePermittedCNAMEs``, which is not yet implemented), enables
+ hostname canonicalization, insofar as calling `.SSHConfig.lookup` with a
+ given hostname will return a canonicalized copy of the config data, including
+ an updated ``HostName`` value.
+
+ .. versionadded:: 2.7
+
+- ``CanonicalizeMaxDots``
+
+ .. versionadded:: 2.7
+
+- ``Host``
+- ``HostName``: used in ``%h`` :ref:`token expansion <TOKENS>`
+- ``Match``: supports the keywords ``all``, ``canonical``, ``exec``, ``final``,
+ ``host``, ``localuser``, ``originalhost``, and ``user``, with the following
+ caveats:
+
+ - You must have the optional dependency Invoke installed; see :ref:`the
+ installation docs <paramiko-itself>` (in brief: install
+ ``paramiko[invoke]`` or ``paramiko[all]``).
+ - As usual, connection-time information is not present during config
+ lookup, and thus cannot be used to determine matching. This primarily
+ impacts ``Match user``, which can match against loaded ``User`` values
+ but has no knowledge about connection-time usernames.
+
+ .. versionadded:: 2.7
+ .. versionchanged:: 3.3
+ Added support for the ``final`` keyword.
+
+- ``Port``: supplies potential values for ``%p`` :ref:`token expansion
+ <TOKENS>`.
+- ``ProxyCommand``: see our `.ProxyCommand` class for an easy
+ way to honor this keyword from a config you've parsed.
+
+ - Honors :ref:`token expansion <TOKENS>`.
+ - When a lookup would result in an effective ``ProxyCommand none``,
+ Paramiko (as of 1.x-2.x) strips it from the resulting dict entirely. A
+ later major version may retain the ``"none"`` marker for clarity's sake.
+
+- ``User``: supplies potential values for ``%u`` :ref:`token expansion
+ <TOKENS>`.
+
+.. _TOKENS:
+
+Expansion tokens
+----------------
+
+We support most SSH config expansion tokens where possible, so when they are
+present in a config file source, the result of a `.SSHConfig.lookup` will
+contain the expansions/substitutions (based on the rest of the config or
+properties of the local system).
+
+Specifically, we are known to support the below, where applicable (e.g. as in
+OpenSSH, ``%L`` works in ``ControlPath`` but not elsewhere):
+
+- ``%C``
+- ``%d``
+- ``%h``
+- ``%l``
+- ``%L``
+- ``%n``
+- ``%p``
+- ``%r``
+- ``%u``: substitutes the configured ``User`` value, or the local user (as seen
+ by ``getpass.getuser``) if not specified.
+
+In addition, we extend OpenSSH's tokens as follows:
+
+- ``~`` is treated like ``%d`` (expands to the local user's home directory
+ path) when expanding ``ProxyCommand`` values, since ``ProxyCommand`` does not
+ natively support ``%d`` for some reason.
+
+
+.. _ssh_config: https://man.openbsd.org/ssh_config
+.. _Fabric: http://fabfile.org
+
+
+``config`` module API documentation
+===================================
+
+Mostly of interest to contributors; see previous section for behavioral
+details.
+
+.. automodule:: paramiko.config
+ :member-order: bysource
diff --git a/sites/docs/api/file.rst b/sites/docs/api/file.rst
new file mode 100644
index 0000000..199f3ec
--- /dev/null
+++ b/sites/docs/api/file.rst
@@ -0,0 +1,4 @@
+Buffered files
+==============
+
+.. automodule:: paramiko.file
diff --git a/sites/docs/api/hostkeys.rst b/sites/docs/api/hostkeys.rst
new file mode 100644
index 0000000..770652f
--- /dev/null
+++ b/sites/docs/api/hostkeys.rst
@@ -0,0 +1,5 @@
+Host keys / ``known_hosts`` files
+=================================
+
+.. automodule:: paramiko.hostkeys
+ :member-order: bysource
diff --git a/sites/docs/api/kex_gss.rst b/sites/docs/api/kex_gss.rst
new file mode 100644
index 0000000..9fd0922
--- /dev/null
+++ b/sites/docs/api/kex_gss.rst
@@ -0,0 +1,5 @@
+GSS-API key exchange
+====================
+
+.. automodule:: paramiko.kex_gss
+ :member-order: bysource
diff --git a/sites/docs/api/keys.rst b/sites/docs/api/keys.rst
new file mode 100644
index 0000000..a456f50
--- /dev/null
+++ b/sites/docs/api/keys.rst
@@ -0,0 +1,28 @@
+============
+Key handling
+============
+
+Parent key class
+================
+
+.. automodule:: paramiko.pkey
+
+DSA (DSS)
+=========
+
+.. automodule:: paramiko.dsskey
+
+RSA
+===
+
+.. automodule:: paramiko.rsakey
+
+ECDSA
+=====
+
+.. automodule:: paramiko.ecdsakey
+
+Ed25519
+=======
+
+.. automodule:: paramiko.ed25519key
diff --git a/sites/docs/api/message.rst b/sites/docs/api/message.rst
new file mode 100644
index 0000000..8d531e0
--- /dev/null
+++ b/sites/docs/api/message.rst
@@ -0,0 +1,4 @@
+Message
+=======
+
+.. automodule:: paramiko.message
diff --git a/sites/docs/api/packet.rst b/sites/docs/api/packet.rst
new file mode 100644
index 0000000..4089203
--- /dev/null
+++ b/sites/docs/api/packet.rst
@@ -0,0 +1,4 @@
+Packetizer
+==========
+
+.. automodule:: paramiko.packet
diff --git a/sites/docs/api/pipe.rst b/sites/docs/api/pipe.rst
new file mode 100644
index 0000000..e480446
--- /dev/null
+++ b/sites/docs/api/pipe.rst
@@ -0,0 +1,4 @@
+Cross-platform pipe implementations
+===================================
+
+.. automodule:: paramiko.pipe
diff --git a/sites/docs/api/proxy.rst b/sites/docs/api/proxy.rst
new file mode 100644
index 0000000..489b14e
--- /dev/null
+++ b/sites/docs/api/proxy.rst
@@ -0,0 +1,4 @@
+``ProxyCommand`` support
+========================
+
+.. automodule:: paramiko.proxy
diff --git a/sites/docs/api/server.rst b/sites/docs/api/server.rst
new file mode 100644
index 0000000..ea85454
--- /dev/null
+++ b/sites/docs/api/server.rst
@@ -0,0 +1,5 @@
+Server implementation
+=====================
+
+.. automodule:: paramiko.server
+ :member-order: bysource
diff --git a/sites/docs/api/sftp.rst b/sites/docs/api/sftp.rst
new file mode 100644
index 0000000..956eada
--- /dev/null
+++ b/sites/docs/api/sftp.rst
@@ -0,0 +1,13 @@
+SFTP
+====
+
+.. automodule:: paramiko.sftp
+.. automodule:: paramiko.sftp_client
+.. automodule:: paramiko.sftp_server
+.. automodule:: paramiko.sftp_attr
+.. automodule:: paramiko.sftp_file
+ :inherited-members:
+ :no-special-members:
+ :show-inheritance:
+.. automodule:: paramiko.sftp_handle
+.. automodule:: paramiko.sftp_si
diff --git a/sites/docs/api/ssh_exception.rst b/sites/docs/api/ssh_exception.rst
new file mode 100644
index 0000000..64872dc
--- /dev/null
+++ b/sites/docs/api/ssh_exception.rst
@@ -0,0 +1,4 @@
+Exceptions
+==========
+
+.. automodule:: paramiko.ssh_exception
diff --git a/sites/docs/api/ssh_gss.rst b/sites/docs/api/ssh_gss.rst
new file mode 100644
index 0000000..155fcff
--- /dev/null
+++ b/sites/docs/api/ssh_gss.rst
@@ -0,0 +1,17 @@
+GSS-API authentication
+======================
+
+.. automodule:: paramiko.ssh_gss
+ :member-order: bysource
+
+.. autoclass:: _SSH_GSSAuth
+ :member-order: bysource
+
+.. autoclass:: _SSH_GSSAPI_OLD
+ :member-order: bysource
+
+.. autoclass:: _SSH_GSSAPI_NEW
+ :member-order: bysource
+
+.. autoclass:: _SSH_SSPI
+ :member-order: bysource
diff --git a/sites/docs/api/transport.rst b/sites/docs/api/transport.rst
new file mode 100644
index 0000000..7c9d9fd
--- /dev/null
+++ b/sites/docs/api/transport.rst
@@ -0,0 +1,5 @@
+Transport
+=========
+
+.. automodule:: paramiko.transport
+ :member-order: bysource
diff --git a/sites/docs/conf.py b/sites/docs/conf.py
new file mode 100644
index 0000000..79958e6
--- /dev/null
+++ b/sites/docs/conf.py
@@ -0,0 +1,28 @@
+# Obtain shared config values
+import os, sys
+from os.path import abspath, join, dirname
+
+sys.path.append(abspath(".."))
+sys.path.append(abspath("../.."))
+from shared_conf import *
+
+# Enable autodoc, intersphinx
+extensions.extend(["sphinx.ext.autodoc"])
+
+# Autodoc settings
+autodoc_default_options = {
+ "members": True,
+ "special-members": True,
+}
+
+# Default is 'local' building, but reference the public www site when building
+# under RTD.
+target = join(dirname(__file__), "..", "www", "_build")
+if os.environ.get("READTHEDOCS") == "True":
+ target = "http://paramiko.org"
+intersphinx_mapping["www"] = (target, None)
+
+# Sister-site links to WWW
+html_theme_options["extra_nav_links"] = {
+ "Main website": "http://www.paramiko.org"
+}
diff --git a/sites/docs/index.rst b/sites/docs/index.rst
new file mode 100644
index 0000000..675fe59
--- /dev/null
+++ b/sites/docs/index.rst
@@ -0,0 +1,75 @@
+====================================
+Welcome to Paramiko's documentation!
+====================================
+
+This site covers Paramiko's usage & API documentation. For basic info on what
+Paramiko is, including its public changelog & how the project is maintained,
+please see `the main project website <http://paramiko.org>`_.
+
+
+API documentation
+=================
+
+The high-level client API starts with creation of an `.SSHClient` object. For
+more direct control, pass a socket (or socket-like object) to a `.Transport`,
+and use `start_server <.Transport.start_server>` or `start_client
+<.Transport.start_client>` to negotiate with the remote host as either a server
+or client.
+
+As a client, you are responsible for authenticating using a password or private
+key, and checking the server's host key. (Key signature and verification is
+done by paramiko, but you will need to provide private keys and check that the
+content of a public key matches what you expected to see.)
+
+As a server, you are responsible for deciding which users, passwords, and keys
+to allow, and what kind of channels to allow.
+
+Once you have finished, either side may request flow-controlled `channels
+<.Channel>` to the other side, which are Python objects that act like sockets,
+but send and receive data over the encrypted session.
+
+For details, please see the following tables of contents (which are organized
+by area of interest.)
+
+
+Core SSH protocol classes
+-------------------------
+
+.. toctree::
+ api/channel
+ api/client
+ api/message
+ api/packet
+ api/transport
+
+
+Authentication & keys
+---------------------
+
+.. toctree::
+ api/auth
+ api/agent
+ api/hostkeys
+ api/keys
+ api/ssh_gss
+ api/kex_gss
+
+
+Other primary functions
+-----------------------
+
+.. toctree::
+ api/config
+ api/proxy
+ api/server
+ api/sftp
+
+
+Miscellany
+----------
+
+.. toctree::
+ api/buffered_pipe
+ api/file
+ api/pipe
+ api/ssh_exception
diff --git a/sites/shared_conf.py b/sites/shared_conf.py
new file mode 100644
index 0000000..5e85def
--- /dev/null
+++ b/sites/shared_conf.py
@@ -0,0 +1,33 @@
+from datetime import datetime
+
+import alabaster
+
+
+# Alabaster theme + mini-extension
+html_theme_path = [alabaster.get_path()]
+extensions = ["alabaster", "sphinx.ext.intersphinx"]
+# Paths relative to invoking conf.py - not this shared file
+html_theme = "alabaster"
+html_theme_options = {
+ "description": "A Python implementation of SSHv2.",
+ "github_user": "paramiko",
+ "github_repo": "paramiko",
+ "analytics_id": "UA-18486793-2",
+ "travis_button": False,
+ "tidelift_url": "https://tidelift.com/subscription/pkg/pypi-paramiko?utm_source=pypi-paramiko&utm_medium=referral&utm_campaign=docs",
+}
+html_sidebars = {
+ "**": ["about.html", "navigation.html", "searchbox.html", "donate.html"]
+}
+
+# Everything intersphinx's to Python
+intersphinx_mapping = {"python": ("https://docs.python.org/3.6/", None)}
+
+# Regular settings
+project = "Paramiko"
+copyright = f"{datetime.now().year} Jeff Forcier"
+master_doc = "index"
+templates_path = ["_templates"]
+exclude_trees = ["_build"]
+source_suffix = ".rst"
+default_role = "obj"
diff --git a/sites/www/.readthedocs.yaml b/sites/www/.readthedocs.yaml
new file mode 100644
index 0000000..4f7d397
--- /dev/null
+++ b/sites/www/.readthedocs.yaml
@@ -0,0 +1,13 @@
+version: 2
+
+build:
+ os: "ubuntu-22.04"
+ tools:
+ python: "3.7"
+
+python:
+ install:
+ - requirements: dev-requirements.txt
+
+sphinx:
+ configuration: sites/www/conf.py
diff --git a/sites/www/_templates/rss.xml b/sites/www/_templates/rss.xml
new file mode 100644
index 0000000..f6f9cbd
--- /dev/null
+++ b/sites/www/_templates/rss.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
+ <channel>
+ <atom:link href="{{ atom }}" rel="self" type="application/rss+xml" />
+ <title>{{ title }}</title>
+ <link>{{ link }}</link>
+ <description>{{ description }}</description>
+ <pubDate>{{ date }}</pubDate>
+ {% for link, title, desc, date in posts %}
+ <item>
+ <link>{{ link }}</link>
+ <guid>{{ link }}</guid>
+ <title><![CDATA[{{ title }}]]></title>
+ <description><![CDATA[{{ desc }}]]></description>
+ <pubDate>{{ date }}</pubDate>
+ </item>
+ {% endfor %}
+ </channel>
+</rss>
diff --git a/sites/www/changelog.rst b/sites/www/changelog.rst
new file mode 100644
index 0000000..b7d93b3
--- /dev/null
+++ b/sites/www/changelog.rst
@@ -0,0 +1,1668 @@
+=========
+Changelog
+=========
+
+- :release:`3.4.0 <2023-12-18>`
+- :feature:`-` `Transport` grew a new ``packetizer_class`` kwarg for overriding
+ the packet-handler class used internally. Mostly for testing, but advanced
+ users may find this useful when doing deep hacks.
+- :bug:`-` Address `CVE 2023-48795<https://terrapin-attack.com/>`_ (aka the
+ "Terrapin Attack", a vulnerability found in the SSH protocol re: treatment of
+ packet sequence numbers) as follows:
+
+ - The vulnerability only impacts encrypt-then-MAC digest algorithms in
+ tandem with CBC ciphers, and ChaCha20-poly1305; of these, Paramiko
+ currently only implements ``hmac-sha2-(256|512)-etm`` in tandem with
+ ``AES-CBC``. If you are unable to upgrade to Paramiko versions containing
+ the below fixes right away, you may instead use the
+ ``disabled_algorithms`` connection option to disable the ETM MACs and/or
+ the CBC ciphers (this option is present in Paramiko >=2.6).
+ - As the fix for the vulnerability requires both ends of the connection to
+ cooperate, the below changes will only take effect when the remote end is
+ OpenSSH >= 9.6 (or equivalent, such as Paramiko in server mode, as of
+ this patch version) and configured to use the new "strict kex" mode.
+ Paramiko will always attempt to use "strict kex" mode if offered by the
+ server, unless you override this by specifying ``strict_kex=False`` in
+ `Transport.__init__`.
+ - Paramiko will now raise an `SSHException` subclass (`MessageOrderError`)
+ when protocol messages are received in unexpected order. This includes
+ situations like receiving ``MSG_DEBUG`` or ``MSG_IGNORE`` during initial
+ key exchange, which are no longer allowed during strict mode.
+ - Key (re)negotiation -- i.e. ``MSG_NEWKEYS``, whenever it is encountered
+ -- now resets packet sequence numbers. (This should be invisible to users
+ during normal operation, only causing exceptions if the exploit is
+ encountered, which will usually result in, again, `MessageOrderError`.)
+ - Sequence number rollover will now raise `SSHException` if it occurs
+ during initial key exchange (regardless of strict mode status).
+
+ Thanks to Fabian Bäumer, Marcus Brinkmann, and Jörg Schwenk for submitting
+ details on the CVE prior to release.
+
+- :bug:`-` Tweak ``ext-info-(c|s)`` detection during KEXINIT protocol phase;
+ the original implementation made assumptions based on an OpenSSH
+ implementation detail.
+- :release:`3.3.1 <2023-07-28>`
+- :bug:`-` Cleaned up some very old root level files, mostly just to exercise
+ some of our doc build and release machinery. This changelog entry
+ intentionally left blank! ``nothing-to-see-here-move-along.gif``
+- :release:`3.3.0 <2023-07-28>`
+- :feature:`1907` (solves :issue:`1992`) Add support and tests for ``Match
+ final …`` (frequently used in ProxyJump configurations to exclude the jump
+ host) to our :ref:`SSH config parser <ssh-config-support>`. Patch by
+ ``@commonism``.
+- :feature:`2058` (solves :issue:`1587` and possibly others) Add an explicit
+ ``max_concurrent_prefetch_requests`` argument to
+ `paramiko.client.SSHClient.get` and `paramiko.client.SSHClient.getfo`,
+ allowing users to limit the number of concurrent requests used during
+ prefetch. Patch by ``@kschoelhorn``, with a test by ``@bwinston-sdp``.
+- :release:`3.2.0 <2023-05-25>`
+- :bug:`- major` Fixed a very sneaky bug found at the apparently
+ rarely-traveled intersection of ``RSA-SHA2`` keys, certificates, SSH agents,
+ and stricter-than-OpenSSH server targets. This manifested as yet another
+ "well, if we turn off SHA2 at one end or another, everything works again"
+ problem, for example with version 12 of the Teleport server endpoint.
+
+ This has been fixed; Paramiko tweaked multiple aspects of how it requests
+ agent signatures, and the agent appears to do the right thing now.
+
+ Thanks to Ryan Stoner for the bug report and testing.
+- :bug:`2012 major` (also :issue:`1961` and countless others) The
+ ``server-sig-algs`` and ``RSA-SHA2`` features added around Paramiko 2.9 or
+ so, had the annoying side effect of not working with servers that don't
+ support *either* of those feature sets, requiring use of
+ ``disabled_algorithms`` to forcibly disable the SHA2 algorithms on Paramiko's
+ end.
+
+ The **experimental** `~paramiko.transport.ServiceRequestingTransport` (noted
+ in its own entry in this changelog) includes a fix for this issue,
+ specifically by falling back to the same algorithm as the in-use pubkey if
+ it's in the algorithm list (leaving the "first algorithm in said list" as an
+ absolute final fallback).
+- :feature:`-` Implement ``_fields()`` on `~paramiko.agent.AgentKey` so that it
+ may be compared (via ``==``) with other `~paramiko.pkey.PKey` instances.
+- :bug:`23 major` Since its inception, Paramiko has (for reasons lost to time)
+ implemented authentication as a side effect of handling affirmative replies
+ to ``MSG_SERVICE_REQUEST`` protocol messages. What this means is Paramiko
+ makes one such request before every ``MSG_USERAUTH_REQUEST``, i.e. every auth
+ attempt.
+
+ OpenSSH doesn't care if clients send multiple service requests, but other
+ server implementations are often stricter in what they accept after an
+ initial service request (due to the RFCs not being clear). This can result in
+ odd behavior when a user doesn't authenticate successfully on the very first
+ try (for example, when the right key for a target host is the third in one's
+ ssh-agent).
+
+ This version of Paramiko now contains an opt-in
+ `~paramiko.transport.Transport` subclass,
+ `~paramiko.transport.ServiceRequestingTransport`, which more-correctly
+ implements service request handling in the Transport, and uses an
+ auth-handler subclass internally which has been similarly adapted. Users
+ wanting to try this new experimental code path may hand this class to
+ `SSHClient.connect <paramiko.client.SSHClient.connect>` as its
+ ``transport_factory`` kwarg.
+
+ .. warning::
+ This feature is **EXPERIMENTAL** and its code may be subject to change.
+
+ In addition:
+ - minor backwards incompatible changes exist in the new code paths,
+ most notably the removal of the (inconsistently applied and rarely
+ used) ``event`` arguments to the ``auth_xxx`` methods.
+ - GSSAPI support has only been partially implemented, and is untested.
+
+ .. note::
+ Some minor backwards-*compatible* changes were made to the **existing**
+ Transport and AuthHandler classes to facilitate the new code. For
+ example, ``Transport._handler_table`` and
+ ``AuthHandler._client_handler_table`` are now properties instead of raw
+ attributes.
+
+- :feature:`387` Users of `~paramiko.client.SSHClient` can now configure the
+ authentication logic Paramiko uses when connecting to servers; this
+ functionality is intended for advanced users and higher-level libraries such
+ as `Fabric <https://fabfile.org>`_. See `~paramiko.auth_strategy` for
+ details.
+
+ Fabric's co-temporal release includes a proof-of-concept use of this feature,
+ implementing an auth flow much closer to that of the OpenSSH client (versus
+ Paramiko's legacy behavior). It is **strongly recommended** that if this
+ interests you, investigate replacing any direct use of ``SSHClient`` with
+ Fabric's ``Connection``.
+
+ .. warning::
+ This feature is **EXPERIMENTAL**; please see its docs for details.
+
+- :feature:`-` Enhanced `~paramiko.agent.AgentKey` with new attributes, such
+ as:
+
+ - Added a ``comment`` attribute (and constructor argument);
+ `Agent.get_keys() <paramiko.agent.Agent.get_keys>` now uses this kwarg to
+ store any comment field sent over by the agent. The original version of
+ the agent feature inexplicably did not store the comment anywhere.
+ - Agent-derived keys now attempt to instantiate a copy of the appropriate
+ key class for access to other algorithm-specific members (eg key size).
+ This is available as the ``.inner_key`` attribute.
+
+ .. note::
+ This functionality is now in use in Fabric's new ``--list-agent-keys``
+ feature, as well as in Paramiko's debug logging.
+
+- :feature:`-` `~paramiko.pkey.PKey` now offers convenience
+ "meta-constructors", static methods that simplify the process of
+ instantiating the correct subclass for a given key input.
+
+ For example, `PKey.from_path <paramiko.pkey.PKey.from_path>` can load a file
+ path without knowing *a priori* what type of key it is (thanks to some handy
+ methods within our cryptography dependency). Going forwards, we expect this
+ to be the primary method of loading keys by user code that runs on "human
+ time" (i.e. where some minor efficiencies are worth the convenience).
+
+ In addition, `PKey.from_type_string <paramiko.pkey.PKey.from_type_string>`
+ now exists, and is being used in some internals to load ssh-agent keys.
+
+ As part of these changes, `~paramiko.pkey.PKey` and friends grew an
+ `~paramiko.pkey.PKey.identifiers` classmethod; this is inspired by the
+ `~paramiko.ecdsakey.ECDSAKey.supported_key_format_identifiers` classmethod
+ (which now refers to the new method.) This also includes adding a ``.name``
+ attribute to most key classes (which will eventually replace ``.get_name()``.
+
+- :feature:`-` `~paramiko.pkey.PKey` grew a new ``.algorithm_name`` property
+ which displays the key algorithm; this is typically derived from the value of
+ `~paramiko.pkey.PKey.get_name`. For example, ED25519 keys have a ``get_name``
+ of ``ssh-ed25519`` (the SSH protocol key type field value), and now have a
+ ``algorithm_name`` of ``ED25519``.
+- :feature:`-` `~paramiko.pkey.PKey` grew a new ``.fingerprint`` property which
+ emits a fingerprint string matching the SHA256+Base64 values printed by
+ various OpenSSH tooling (eg ``ssh-add -l``, ``ssh -v``). This is intended to
+ help troubleshoot Paramiko-vs-OpenSSH behavior and will eventually replace
+ the venerable ``get_fingerprint`` method.
+- :bug:`- major` `~paramiko.agent.AgentKey` had a dangling Python 3
+ incompatible ``__str__`` method returning bytes. This method has been
+ removed, allowing the superclass' (`~paramiko.pkey.PKey`) method to run
+ instead.
+- :release:`3.1.0 <2023-03-10>`
+- :feature:`2013` (solving :issue:`2009`, plus others) Add an explicit
+ ``channel_timeout`` keyword argument to `paramiko.client.SSHClient.connect`,
+ allowing users to configure the previously-hardcoded default value of 3600
+ seconds. Thanks to ``@VakarisZ`` and ``@ilija-lazoroski`` for the report and
+ patch, with credit to Mike Salvatore for patch review.
+- :feature:`2173` Accept single tabs as field separators (in addition to
+ single spaces) in `<paramiko.hostkeys.HostKeyEntry.from_line>` for parity
+ with OpenSSH's KnownHosts parser. Patched by Alex Chavkin.
+- :support:`2178 backported` Apply ``codespell`` to the codebase, which found a
+ lot of very old minor spelling mistakes in docstrings. Also modernize many
+ instances of ``*largs`` vs ``*args`` and ``**kwarg`` vs ``**kwargs``. Patch
+ courtesy of Yaroslav Halchenko, with review from Brian Skinn.
+- :release:`3.0.0 <2023-01-20>`
+- :bug:`2110 major` Remove some unnecessary ``__repr__`` calls when handling
+ bytes-vs-str conversions. This was apparently doing a lot of unintentional
+ data processing, which adds up in some use cases -- such as SFTP transfers,
+ which may now be significantly faster. Kudos to Shuhua Zhong for catch &
+ patch.
+- :bug:`2165 major` Streamline some redundant (and costly) byte conversion
+ calls in the packetizer and the core SFTP module. This should lead to some
+ SFTP speedups at the very least. Thanks to Alex Gaynor for the patch.
+- :support:`-` ``paramiko.util.retry_on_signal`` (and any internal uses of
+ same, and also any internal retries of ``EINTR`` on eg socket operations) has
+ been removed. As of Python 3.5, per `PEP 475
+ <https://peps.python.org/pep-0475/>`_, this functionality (and retrying
+ ``EINTR`` generally) is now part of the standard library.
+
+ .. warning::
+ This change is backwards incompatible if you were explicitly
+ importing/using this particular function. The observable behavior otherwise
+ should not be changing.
+
+- :support:`732` (also re: :issue:`630`) `~paramiko.config.SSHConfig` used to
+ straight-up delete the ``proxycommand`` key from config lookup results when
+ the source config said ``ProxyCommand none``. This has been altered to
+ preserve the key and give it the Python value ``None``, thus making the
+ Python representation more in line with the source config file.
+
+ .. warning::
+ This change is backwards incompatible if you were relying on the old (1.x,
+ 2.x) behavior for some reason (eg assuming all ``proxycommand`` values were
+ valid subcommand strings).
+
+- :support:`-` The behavior of private key classes' (ie anything inheriting
+ from `~paramiko.pkey.PKey`) private key writing methods used to perform a
+ manual, extra ``chmod`` call after writing. This hasn't been strictly
+ necessary since the mid 2.x release line (when key writing started giving the
+ ``mode`` argument to `os.open`), and has now been removed entirely.
+
+ This should only be observable if you were mocking Paramiko's system calls
+ during your own testing, or similar.
+- :support:`-` ``PKey.__cmp__`` has been removed. Ordering-oriented comparison
+ of key files is unlikely to have ever made sense (the old implementation
+ attempted to order by the hashes of the key material) and so we have not
+ bothered setting up ``__lt__`` and friends at this time. The class continues
+ to have its original ``__eq__`` untouched.
+
+ .. warning::
+ This change is backwards incompatible if you were actually trying to sort
+ public key objects (directly or indirectly). Please file bug reports
+ detailing your use case if you have some intractable need for this
+ behavior, and we'll consider adding back the necessary Python 3 magic
+ methods so that it works as before.
+
+- :bug:`- major` A handful of lower-level classes (notably
+ `paramiko.message.Message` and `paramiko.pkey.PKey`) previously returned
+ `bytes` objects from their implementation of ``__str__``, even under Python
+ 3; and there was never any ``__bytes__`` method.
+
+ These issues have been fixed by renaming ``__str__`` to ``__bytes__`` and
+ relying on Python's default "stringification returns the output of
+ ``__repr__``" behavior re: any real attempts to ``str()`` such objects.
+- :support:`-` ``paramiko.common.asbytes`` has been moved to
+ ``paramiko.util.asbytes``.
+
+ .. warning::
+ This change is backwards incompatible if you were directly using this
+ function (which is unlikely).
+
+- :support:`-` Remove the now irrelevant ``paramiko.py3compat`` module.
+
+ .. warning::
+ This change is backwards incompatible. Such references should be
+ search-and-replaced with their modern Python 3.6+ equivalents; in some
+ cases, still-useful methods or values have been moved to ``paramiko.util``
+ (most) or ``paramiko.common`` (``byte_*``).
+
+- :support:`-` Drop support for Python versions less than 3.6, including Python
+ 2. So long and thanks for all the fish!
+
+ .. warning::
+ This change is backwards incompatible. However, our packaging metadata has
+ been updated to include ``python_requires``, so this should not cause
+ breakage unless you're on an old installation method that can't read this
+ metadata.
+
+ .. note::
+ As part of this change, our dependencies have been updated; eg we now
+ require Cryptography>=3.3, up from 2.5.
+
+- :release:`2.12.0 <2022-11-04>`
+- :feature:`2125` (also re: :issue:`2054`) Add a ``transport_factory`` kwarg to
+ `SSHClient.connect <paramiko.client.SSHClient.connect>` for advanced
+ users to gain more control over early Transport setup and manipulation.
+ Thanks to Noah Pederson for the patch.
+- :release:`2.11.1 <2022-11-04>`
+- :release:`2.10.6 <2022-11-04>`
+- :bug:`1822` (via, and relating to, far too many other issues to mention here)
+ Update `~paramiko.client.SSHClient` so it explicitly closes its wrapped
+ socket object upon encountering socket errors at connection time. This should
+ help somewhat with certain classes of memory leaks, resource warnings, and/or
+ errors (though we hasten to remind everyone that Client and Transport have
+ their own ``.close()`` methods for use in non-error situations!). Patch
+ courtesy of ``@YoavCohen``.
+- bug:`1637` (via :issue:`1599`) Raise `~paramiko.ssh_exception.SSHException`
+ explicitly when blank private key data is loaded, instead of the natural
+ result of ``IndexError``. This should help more bits of Paramiko or
+ Paramiko-adjacent codebases to correctly handle this class of error. Credit:
+ Nicholas Dietz.
+- :release:`2.11.0 <2022-05-16>`
+- :release:`2.10.5 <2022-05-16>`
+- :release:`2.9.5 <2022-05-16>`
+- :bug:`1933` Align signature verification algorithm with OpenSSH re:
+ zero-padding signatures which don't match their nominal size/length. This
+ shouldn't affect most users, but will help Paramiko-implemented SSH servers
+ handle poorly behaved clients such as PuTTY. Thanks to Jun Omae for catch &
+ patch.
+- :bug:`2017` OpenSSH 7.7 and older has a bug preventing it from understanding
+ how to perform SHA2 signature verification for RSA certificates (specifically
+ certs - not keys), so when we added SHA2 support it broke all clients using
+ RSA certificates with these servers. This has been fixed in a manner similar
+ to what OpenSSH's own client does: a version check is performed and the
+ algorithm used is downgraded if needed. Reported by Adarsh Chauhan, with fix
+ suggested by Jun Omae.
+- :support:`2038` (via :issue:`2039`) Recent versions of Cryptography have
+ deprecated Blowfish algorithm support; in lieu of an easy method for users to
+ remove it from the list of algorithms Paramiko tries to import and use, we've
+ decided to remove it from our "preferred algorithms" list. This will both
+ discourage use of a weak algorithm, and avoid warnings. Credit for
+ report/patch goes to Mike Roest.
+- :bug:`2008` (via :issue:`2010`) Windows-native SSH agent support as merged in
+ 2.10 could encounter ``Errno 22`` ``OSError`` exceptions in some scenarios
+ (eg server not cleanly closing a relevant named pipe). This has been worked
+ around and should be less problematic. Reported by Danilo Campana Fuchs and
+ patched by Jun Omae.
+- :release:`2.10.4 <2022-04-25>`
+- :release:`2.9.4 <2022-04-25>`
+- :support:`1838 backported` (via :issue:`1870`/:issue:`2028`) Update
+ ``camelCase`` method calls against the ``threading`` module to be
+ ``snake_case``; this and related tweaks should fix some deprecation warnings
+ under Python 3.10. Thanks to Karthikeyan Singaravelan for the report,
+ ``@Narendra-Neerukonda`` for the patch, and to Thomas Grainger and Jun Omae
+ for patch workshopping.
+- :feature:`1951` Add SSH config token expansion (eg ``%h``, ``%p``) when
+ parsing ``ProxyJump`` directives. Patch courtesy of Bruno Inec.
+- :bug:`1964` (via :issue:`2024` as also reported in :issue:`2023`)
+ `~paramiko.pkey.PKey` instances' ``__eq__`` did not have the usual safety
+ guard in place to ensure they were being compared to another ``PKey`` object,
+ causing occasional spurious ``BadHostKeyException`` (among other things).
+ This has been fixed. Thanks to Shengdun Hua for the original report/patch and
+ to Christopher Papke for the final version of the fix.
+- :support:`2004` (via :issue:`2011`) Apply unittest ``skipIf`` to tests
+ currently using SHA1 in their critical path, to avoid failures on systems
+ starting to disable SHA1 outright in their crypto backends (eg RHEL 9).
+ Report & patch via Paul Howarth.
+- :bug:`2035` Servers offering certificate variants of hostkey algorithms (eg
+ ``ssh-rsa-cert-v01@openssh.com``) could not have their host keys verified by
+ Paramiko clients, as it only ever considered non-cert key types for that part
+ of connection handshaking. This has been fixed.
+- :release:`2.10.3 <2022-03-18>`
+- :release:`2.9.3 <2022-03-18>`
+- :bug:`1963` (via :issue:`1977`) Certificate-based pubkey auth was
+ inadvertently broken when adding SHA2 support; this has been fixed. Reported
+ by Erik Forsberg and fixed by Jun Omae.
+- :bug:`2002` (via :issue:`2003`) Switch from module-global to thread-local
+ storage when recording thread IDs for a logging helper; this should avoid one
+ flavor of memory leak for long-running processes. Catch & patch via Richard
+ Kojedzinszky.
+- :release:`2.10.2 <2022-03-14>`
+- :bug:`2001` Fix Python 2 compatibility breakage introduced in 2.10.1. Spotted
+ by Christian Hammond.
+
+ .. warning::
+ This is almost certainly the last time we will fix Python 2 related
+ errors! Please see `the roadmap
+ <https://bitprophet.org/projects/#roadmap>`_.
+
+- :release:`2.10.1 <2022-03-11>`
+- :bug:`- (2.10+)` (`CVE-2022-24302
+ <https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24302>`_) Creation
+ of new private key files using `~paramiko.pkey.PKey` subclasses was subject
+ to a race condition between file creation & mode modification, which could be
+ exploited by an attacker with knowledge of where the Paramiko-using code
+ would write out such files.
+
+ This has been patched by using `os.open` and `os.fdopen` to ensure new files
+ are opened with the correct mode immediately. We've left the subsequent
+ explicit ``chmod`` in place to minimize any possible disruption, though it
+ may get removed in future backwards-incompatible updates.
+
+ Thanks to Jan Schejbal for the report & feedback on the solution, and to
+ Jeremy Katz at Tidelift for coordinating the disclosure.
+- :release:`2.10.0 <2022-03-11>`
+- :feature:`1976` Add support for the ``%C`` token when parsing SSH config
+ files. Foundational PR submitted by ``@jbrand42``.
+- :feature:`1509` (via :issue:`1868`, :issue:`1837`) Add support for OpenSSH's
+ Windows agent as a fallback when Putty/WinPageant isn't available or
+ functional. Reported by ``@benj56`` with patches/PRs from ``@lewgordon`` and
+ Patrick Spendrin.
+- :bug:`892 major` Significantly speed up low-level read/write actions on
+ `~paramiko.sftp_file.SFTPFile` objects by using `bytearray`/`memoryview`.
+ This is unlikely to change anything for users of the higher level methods
+ like `SFTPClient.get <paramiko.sftp_client.SFTPClient.get>` or
+ `SFTPClient.getfo <paramiko.sftp_client.SFTPClient.getfo>`, but users of
+ `SFTPClient.open <paramiko.sftp_client.SFTPClient.open>` will likely see
+ orders of magnitude improvements for files larger than a few megabytes in
+ size.
+
+ Thanks to ``@jkji`` for the original report and to Sevastian Tchernov for the
+ patch.
+- :support:`1985` Add ``six`` explicitly to install-requires; it snuck into
+ active use at some point but has only been indicated by transitive dependency
+ on ``bcrypt`` until they somewhat-recently dropped it. This will be
+ short-lived until we `drop Python 2
+ support <https://bitprophet.org/projects/#roadmap>`_. Thanks to Sondre
+ Lillebø Gundersen for catch & patch.
+- :release:`2.9.2 <2022-01-08>`
+- :bug:`-` Connecting to servers which support ``server-sig-algs`` but which
+ have no overlap between that list and what a Paramiko client supports, now
+ raise an exception instead of defaulting to ``rsa-sha2-512`` (since the use
+ of ``server-sig-algs`` allows us to know what the server supports).
+- :bug:`-` Enhanced log output when connecting to servers that do not support
+ ``server-sig-algs`` extensions, making the new-as-of-2.9 defaulting to SHA2
+ pubkey algorithms more obvious when it kicks in.
+- :release:`2.9.1 <2021-12-24>`
+- :bug:`1955` Server-side support for ``rsa-sha2-256`` and ``ssh-rsa`` wasn't
+ fully operable after 2.9.0's release (signatures for RSA pubkeys were always
+ run through ``rsa-sha2-512`` instead). Report and early stab at a fix
+ courtesy of Jun Omae.
+- :release:`2.9.0 <2021-12-23>`
+- :feature:`1643` (also :issue:`1925`, :issue:`1644`, :issue:`1326`) Add
+ support for SHA-2 variants of RSA key verification algorithms (as described
+ in :rfc:`8332`) as well as limited SSH extension negotiation (:rfc:`8308`).
+
+ .. warning::
+ This change is slightly backwards incompatible, insofar as action is
+ required if your target systems do not support either RSA2 or the
+ ``server-sig-algs`` protocol extension.
+
+ Specifically, you need to specify ``disabled_algorithms={'keys':
+ ['rsa-sha2-256', 'rsa-sha2-512']}`` in either `SSHClient
+ <paramiko.client.SSHClient.__init__>` or `Transport
+ <paramiko.transport.Transport.__init__>`. See below for details on why.
+
+ How SSH servers/clients decide when and how to use this functionality can be
+ complicated; Paramiko's support is as follows:
+
+ - Client verification of server host key during key exchange will now prefer
+ ``rsa-sha2-512``, ``rsa-sha2-256``, and legacy ``ssh-rsa`` algorithms, in
+ that order, instead of just ``ssh-rsa``.
+
+ - Note that the preference order of other algorithm families such as
+ ``ed25519`` and ``ecdsa`` has not changed; for example, those two
+ groups are still preferred over RSA.
+
+ - Server mode will now offer all 3 RSA algorithms for host key verification
+ during key exchange, similar to client mode, if it has been configured with
+ an RSA host key.
+ - Client mode key exchange now sends the ``ext-info-c`` flag signaling
+ support for ``MSG_EXT_INFO``, and support for parsing the latter
+ (specifically, its ``server-sig-algs`` flag) has been added.
+ - Client mode, when performing public key authentication with an RSA key or
+ cert, will act as follows:
+
+ - In all cases, the list of algorithms to consider is based on the new
+ ``preferred_pubkeys`` list (see below) and ``disabled_algorithms``
+ (specifically, its ``pubkeys`` key); this list, like with host keys,
+ prefers SHA2-512, SHA2-256 and SHA1, in that order.
+ - When the server does not send ``server-sig-algs``, Paramiko will attempt
+ the first algorithm in the above list. Clients connecting to legacy
+ servers should thus use ``disabled_algorithms`` to turn off SHA2.
+ - When the server does send ``server-sig-algs``, the first algorithm
+ supported by both ends is used, or if there is none, it falls back to the
+ previous behavior.
+
+ - SSH agent support grew the ability to specify algorithm flags when
+ requesting private key signatures; this is now used to forward SHA2
+ algorithms when appropriate.
+ - Server mode is now capable of pubkey auth involving SHA-2 signatures from
+ clients, provided one's server implementation actually provides for doing
+ so.
+
+ - This includes basic support for sending ``MSG_EXT_INFO`` (containing
+ ``server-sig-algs`` only) to clients advertising ``ext-info-c`` in their
+ key exchange list.
+
+ In order to implement the above, the following API additions were made:
+
+ - `PKey.sign_ssh_data <paramiko.pkey.PKey>`: Grew an extra, optional
+ ``algorithm`` keyword argument (defaulting to ``None`` for most subclasses,
+ and to ``"ssh-rsa"`` for `~paramiko.rsakey.RSAKey`).
+ - A new `~paramiko.ssh_exception.SSHException` subclass was added,
+ `~paramiko.ssh_exception.IncompatiblePeer`, and is raised in all spots
+ where key exchange aborts due to algorithmic incompatibility.
+
+ - Like all other exceptions in that module, it inherits from
+ ``SSHException``, and as we did not change anything else about the
+ raising (i.e. the attributes and message text are the same) this change
+ is backwards compatible.
+
+ - `~paramiko.transport.Transport` grew a ``_preferred_pubkeys`` attribute and
+ matching ``preferred_pubkeys`` property to match the other, kex-focused,
+ such members. This allows client pubkey authentication to honor the
+ ``disabled_algorithms`` feature.
+
+ Thanks to Krisztián Kovács for the report and an early stab at a patch, as
+ well as the numerous users who submitted feedback on the issue, including but
+ not limited to: Christopher Rabotin, Sam Bull, and Manfred Kaiser.
+
+- :release:`2.8.1 <2021-11-28>`
+- :bug:`985` (via :issue:`992`) Fix listdir failure when server uses a locale.
+ Now on Python 2.7 `SFTPAttributes <paramiko.sftp_attr.SFTPAttributes>` will
+ decode abbreviated month names correctly rather than raise
+ ``UnicodeDecodeError```. Patch courtesy of Martin Packman.
+- :bug:`1024` Deleting items from `~paramiko.hostkeys.HostKeys` would
+ incorrectly raise `KeyError` even for valid keys, due to a logic bug. This
+ has been fixed. Report & patch credit: Jia Zhang.
+- :bug:`1257` (also :issue:`1266`) Update RSA and ECDSA key decoding
+ subroutines to correctly catch exception types thrown by modern
+ versions of Cryptography (specifically ``TypeError`` and
+ its internal ``UnsupportedAlgorithm``). These exception classes will now
+ become `~paramiko.ssh_exception.SSHException` instances instead of bubbling
+ up. Thanks to Ignat Semenov for the report and ``@tylergarcianet`` for an
+ early patch.
+- :bug:`-` (also :issue:`908`) Update `~paramiko.pkey.PKey` and subclasses to
+ compare (``__eq__``) via direct field/attribute comparison instead of hashing
+ (while retaining the existing behavior of ``__hash__`` via a slight
+ refactor). Big thanks to Josh Snyder and Jun Omae for the reports, and to
+ Josh Snyder for reproduction details & patch.
+
+ .. warning::
+ This fixes a security flaw! If you are running Paramiko on 32-bit systems
+ with low entropy (such as any 32-bit Python 2, or a 32-bit Python 3 which
+ is running with ``PYTHONHASHSEED=0``) it is possible for an attacker to
+ craft a new keypair from an exfiltrated public key, which Paramiko would
+ consider equal to the original key.
+
+ This could enable attacks such as, but not limited to, the following:
+
+ - Paramiko server processes would incorrectly authenticate the attacker
+ (using their generated private key) as if they were the victim. We see
+ this as the most plausible attack using this flaw.
+ - Paramiko client processes would incorrectly validate a connected server
+ (when host key verification is enabled) while subjected
+ to a man-in-the-middle attack. This impacts more users than the
+ server-side version, but also carries higher requirements for the
+ attacker, namely successful DNS poisoning or other MITM techniques.
+
+- :release:`2.8.0 <2021-10-09>`
+- :support:`-` Administrivia overhaul, including but not limited to:
+
+ - Migrate CI to CircleCI
+ - Primary dev branch is now ``main`` (renamed)
+ - Many README edits for clarity, modernization etc; including a bunch more
+ (and consistent) status badges & unification with main project site index
+ - PyPI page much more fleshed out (long_description is now filled in with the
+ README; sidebar links expanded; etc)
+ - flake8, pytest configs split out of setup.cfg into their own files
+ - Invoke/invocations (used by maintainers/contributors) upgraded to modern
+ versions
+
+- :bug:`1462 major` (via :issue:`1882`) Newer server-side key exchange
+ algorithms not intended to use SHA1 (``diffie-hellman-group14-sha256``,
+ ``diffie-hellman-group16-sha512``) were incorrectly using SHA1 after all, due
+ to a bug causing them to ignore the ``hash_algo`` class attribute. This has
+ been corrected. Big thanks to ``@miverson`` for the report and to Benno Rice
+ for the patch.
+- :feature:`1846` Add a ``prefetch`` keyword argument to `SFTPClient.get <paramiko.sftp_client.SFTPClient.get>`/`SFTPClient.getfo <paramiko.sftp_client.SFTPClient.getfo>`
+ so users who need to skip SFTP prefetching are able to conditionally turn it
+ off. Thanks to Github user ``@h3ll0r`` for the PR.
+- :release:`2.7.2 <2020-08-30>`
+- :support:`- backported` Update our CI to catch issues with sdist generation,
+ installation and testing.
+- :support:`1727 backported` Add missing test suite fixtures directory to
+ MANIFEST.in, reinstating the ability to run Paramiko's tests from an sdist
+ tarball. Thanks to Sandro Tosi for reporting the issue and to Blazej Michalik
+ for the PR.
+- :support:`1722 backported` Remove leading whitespace from OpenSSH RSA test
+ suite static key fixture, to conform better to spec. Credit: Alex Gaynor.
+- :bug:`-` Fix incorrect string formatting causing unhelpful error message
+ annotation when using Kerberos/GSSAPI. (Thanks, newer version of flake8!)
+- :bug:`1723` Fix incorrectly swapped order of ``p`` and ``q`` numbers when
+ loading OpenSSH-format RSA private keys. At minimum this should address a
+ slowdown when using such keys, and it also means Paramiko works with
+ Cryptography 3.1 and above (which complains strenuously when this problem
+ appears). Thanks to Alex Gaynor for the patch.
+- :release:`2.7.1 <2019-12-09>`
+- :bug:`1567` The new-style private key format (added in 2.7) suffered from an
+ unpadding bug which had been fixed earlier for Ed25519 (as that key type has
+ always used the newer format). That fix has been refactored and applied to
+ the base key class, courtesy of Pierce Lopez.
+- :bug:`1565` (via :issue:`1566`) Fix a bug in support for ECDSA keys under the
+ newly supported OpenSSH key format. Thanks to Pierce Lopez for the patch.
+- :release:`2.7.0 <2019-12-03>`
+- :feature:`602` (via :issue:`1343`, :issue:`1313`, :issue:`618`) Implement
+ support for OpenSSH 6.5-style private key files (typically denoted as having
+ ``BEGIN OPENSSH PRIVATE KEY`` headers instead of PEM format's ``BEGIN RSA
+ PRIVATE KEY`` or similar). If you were getting any sort of weird auth error
+ from "modern" keys generated on newer operating system releases (such as
+ macOS Mojave), this is the first update to try.
+
+ Major thanks to everyone who contributed or tested versions of the patch,
+ including but not limited to: Kevin Abel, Michiel Tiller, Pierce Lopez, and
+ Jared Hobbs.
+- :bug:`- major` ``ssh_config`` :ref:`token expansion <TOKENS>` used a
+ different method of determining the local username (``$USER`` env var),
+ compared to what the (much older) client connection code does
+ (``getpass.getuser``, which includes ``$USER`` but may check other variables
+ first, and is generally much more comprehensive). Both modules now use
+ ``getpass.getuser``.
+- :feature:`-` A couple of outright `~paramiko.config.SSHConfig` parse errors
+ were previously represented as vanilla ``Exception`` instances; as part of
+ recent feature work a more specific exception class,
+ `~paramiko.ssh_exception.ConfigParseError`, has been created. It is now also
+ used in those older spots, which is naturally backwards compatible.
+- :feature:`717` Implement support for the ``Match`` keyword in ``ssh_config``
+ files. Previously, this keyword was simply ignored & keywords inside such
+ blocks were treated as if they were part of the previous block. Thanks to
+ Michael Leinartas for the initial patchset.
+
+ .. note::
+ This feature adds a new :doc:`optional install dependency </installing>`,
+ `Invoke <https://www.pyinvoke.org>`_, for managing ``Match exec``
+ subprocesses.
+
+- :support:`-` Additional :doc:`installation </installing>` ``extras_require``
+ "flavors" (``ed25519``, ``invoke``, and ``all``) have been added to
+ our packaging metadata; see the install docs for details.
+- :bug:`- major` Paramiko's use of ``subprocess`` for ``ProxyCommand`` support
+ is conditionally imported to prevent issues on limited interpreter platforms
+ like Google Compute Engine. However, any resulting ``ImportError`` was lost
+ instead of preserved for raising (in the rare cases where a user tried
+ leveraging ``ProxyCommand`` in such an environment). This has been fixed.
+- :bug:`- major` Perform deduplication of ``IdentityFile`` contents during
+ ``ssh_config`` parsing; previously, if your config would result in the same
+ value being encountered more than once, ``IdentityFile`` would contain that
+ many copies of the same string.
+- :feature:`897` Implement most 'canonical hostname' ``ssh_config``
+ functionality (``CanonicalizeHostname``, ``CanonicalDomains``,
+ ``CanonicalizeFallbackLocal``, and ``CanonicalizeMaxDots``;
+ ``CanonicalizePermittedCNAMEs`` has **not** yet been implemented). All were
+ previously silently ignored. Reported by Michael Leinartas.
+- :support:`-` Explicitly document :ref:`which ssh_config features we
+ currently support <ssh-config-support>`. Previously users just had to guess,
+ which is simply no good.
+- :feature:`-` Add new convenience classmethod constructors to
+ `~paramiko.config.SSHConfig`: `~paramiko.config.SSHConfig.from_text`,
+ `~paramiko.config.SSHConfig.from_file`, and
+ `~paramiko.config.SSHConfig.from_path`. No more annoying two-step process!
+- :release:`2.6.0 <2019-06-23>`
+- :feature:`1463` Add a new keyword argument to `SSHClient.connect
+ <paramiko.client.SSHClient.connect>` and `~paramiko.transport.Transport`,
+ ``disabled_algorithms``, which allows selectively disabling one or more
+ kex/key/cipher/etc algorithms. This can be useful when disabling algorithms
+ your target server (or client) does not support cleanly, or to work around
+ unpatched bugs in Paramiko's own implementation thereof.
+- :release:`2.5.1 <2019-06-23>`
+- :release:`2.4.3 <2019-06-23>`
+- :bug:`1306` (via :issue:`1400`) Fix Ed25519 key handling so certain key
+ comment lengths don't cause ``SSHException("Invalid key")`` (this was
+ technically a bug in how padding, or lack thereof, is
+ calculated/interpreted). Thanks to ``@parke`` for the bug report & Pierce
+ Lopez for the patch.
+- :support:`1440` (with initial fixes via :issue:`1460`) Tweak many exception
+ classes so their string representations are more human-friendly; this also
+ includes incidental changes to some ``super()`` calls.
+
+ The definitions of exceptions' ``__init__`` methods have *not* changed, nor
+ have any log messages been altered, so this should be backwards compatible
+ for everything except the actual exceptions' ``__str__()`` outputs.
+
+ Thanks to Fabian Büchler for original report & Pierce Lopez for the
+ foundational patch.
+- :support:`1311` (for :issue:`584`, replacing :issue:`1166`) Add
+ backwards-compatible support for the ``gssapi`` GSSAPI library, as the
+ previous backend (``python-gssapi``) has since become defunct. This change
+ also includes tests for the GSSAPI functionality.
+
+ Big thanks to Anselm Kruis for the patch and to Sebastian Deiß (author of our
+ initial GSSAPI functionality) for review.
+
+ .. note::
+ This feature also adds ``setup.py`` 'extras' support for installing
+ Paramiko as ``paramiko[gssapi]``, which pulls in the optional
+ dependencies you had to get by hand previously.
+
+ .. note::
+ To be very clear, this patch **does not** remove support for the older
+ ``python-gssapi`` library. We *may* remove that support in a later release,
+ but for now, either library will work. Please upgrade to ``gssapi`` when
+ you can, however, as ``python-gssapi`` is no longer maintained upstream.
+
+- :bug:`322 major` `SSHClient.exec_command
+ <paramiko.client.SSHClient.exec_command>` previously returned a naive
+ `~paramiko.channel.ChannelFile` object for its ``stdin`` value; such objects
+ don't know to properly shut down the remote end's stdin when they
+ ``.close()``. This lead to issues (such as hangs) when running remote
+ commands that read from stdin.
+
+ A new subclass, `~paramiko.channel.ChannelStdinFile`, has been created which
+ closes remote stdin when it itself is closed.
+ `~paramiko.client.SSHClient.exec_command` has been updated to use that class
+ for its ``stdin`` return value.
+
+ Thanks to Brandon Rhodes for the report & steps to reproduce.
+- :release:`2.5.0 <2019-06-09>`
+- :feature:`1233` (also :issue:`1229`, :issue:`1332`) Add support for
+ encrypt-then-MAC (ETM) schemes (``hmac-sha2-256-etm@openssh.com``,
+ ``hmac-sha2-512-etm@openssh.com``) and two newer Diffie-Hellman group key
+ exchange algorithms (``group14``, using SHA256; and ``group16``, using
+ SHA512). Patch courtesy of Edgar Sousa.
+- :feature:`532` (via :issue:`1384` and :issue:`1258`) Add support for
+ Curve25519 key exchange (aka ``curve25519-sha256@libssh.org``). Thanks to
+ Alex Gaynor and Dan Fuhry for supplying patches.
+- :support:`1379` (also :issue:`1369`) Raise Cryptography dependency
+ requirement to version 2.5 (from 1.5) and update some deprecated uses of its
+ API.
+
+ This removes a bunch of warnings of the style
+ ``CryptographyDeprecationWarning: encode_point has been deprecated on
+ EllipticCurvePublicNumbers and will be removed in a future version. Please
+ use EllipticCurvePublicKey.public_bytes to obtain both compressed and
+ uncompressed point encoding`` and similar, which users who had eventually
+ upgraded to Cryptography 2.x would encounter.
+
+ .. warning::
+ This change is backwards incompatible **if** you are unable to upgrade your
+ version of Cryptography. Please see `Cryptography's own changelog
+ <https://cryptography.io/en/latest/changelog/>`_ for details on what may
+ change when you upgrade; for the most part the only changes involved
+ dropping older Python versions (such as 2.6, 3.3, or some PyPy editions)
+ which Paramiko itself has already dropped.
+
+- :support:`1378 backported` Add support for the modern (as of Python 3.3)
+ import location of ``MutableMapping`` (used in host key management) to avoid
+ the old location becoming deprecated in Python 3.8. Thanks to Josh Karpel for
+ catch & patch.
+- :release:`2.4.2 <2018-09-18>`
+- :release:`2.3.3 <2018-09-18>`
+- :release:`2.2.4 <2018-09-18>`
+- :release:`2.1.6 <2018-09-18>`
+- :release:`2.0.9 <2018-09-18>`
+- :bug:`-` Modify protocol message handling such that ``Transport`` does not
+ respond to ``MSG_UNIMPLEMENTED`` with its own ``MSG_UNIMPLEMENTED``. This
+ behavior probably didn't cause any outright errors, but it doesn't seem to
+ conform to the RFCs and could cause (non-infinite) feedback loops in some
+ scenarios (usually those involving Paramiko on both ends).
+- :bug:`1283` Fix exploit (CVE-2018-1000805) in Paramiko's server mode (**not**
+ client mode) where hostile clients could trick the server into thinking they
+ were authenticated without actually submitting valid authentication.
+
+ Specifically, steps have been taken to start separating client and server
+ related message types in the message handling tables within ``Transport`` and
+ ``AuthHandler``; this work is not complete but enough has been performed to
+ close off this particular exploit (which was the only obvious such exploit
+ for this particular channel).
+
+ Thanks to Daniel Hoffman for the detailed report.
+- :support:`1292 backported (<2.4)` Backport changes from :issue:`979` (added
+ in Paramiko
+ 2.3) to Paramiko 2.0-2.2, using duck-typing to preserve backwards
+ compatibility. This allows these older versions to use newer Cryptography
+ sign/verify APIs when available, without requiring them (as is the case with
+ Paramiko 2.3+).
+
+ Practically speaking, this change prevents spamming of
+ ``CryptographyDeprecationWarning`` notices which pop up in the above scenario
+ (older Paramiko, newer Cryptography).
+
+ .. note::
+ This is a no-op for Paramiko 2.3+, which have required newer Cryptography
+ releases since they were released.
+
+- :support:`1291 backported (<2.4)` Backport pytest support and application of
+ the ``black`` code formatter (both of which previously only existed in the
+ 2.4 branch and above) to everything 2.0 and newer. This makes back/forward
+ porting bugfixes significantly easier.
+- :support:`1262 backported` Add ``*.pub`` files to the MANIFEST so distributed
+ source packages contain some necessary test assets. Credit: Alexander
+ Kapshuna.
+- :feature:`1212` Updated `SSHConfig.lookup <paramiko.config.SSHConfig.lookup>`
+ so it returns a new, type-casting-friendly dict subclass
+ (`~paramiko.config.SSHConfigDict`) in lieu of dict literals. This ought to be
+ backwards compatible, and allows an easier way to check boolean or int type
+ ``ssh_config`` values. Thanks to Chris Rose for the patch.
+- :support:`1191` Update our install docs with (somewhat) recently added
+ additional dependencies; we previously only required Cryptography, but the
+ docs never got updated after we incurred ``bcrypt`` and ``pynacl``
+ requirements for Ed25519 key support.
+
+ Additionally, ``pyasn1`` was never actually hard-required; it was necessary
+ during a development branch, and is used by the optional GSSAPI support, but
+ is not required for regular installation. Thus, it has been removed from our
+ ``setup.py`` and its imports in the GSSAPI code made optional.
+
+ Credit to ``@stevenwinfield`` for highlighting the outdated install docs.
+- :release:`2.4.1 <2018-03-12>`
+- :release:`2.3.2 <2018-03-12>`
+- :release:`2.2.3 <2018-03-12>`
+- :release:`2.1.5 <2018-03-12>`
+- :release:`2.0.8 <2018-03-12>`
+- :release:`1.18.5 <2018-03-12>`
+- :release:`1.17.6 <2018-03-12>`
+- :bug:`1175 (1.17+)` Fix a security flaw (CVE-2018-7750) in Paramiko's server
+ mode (emphasis on **server** mode; this does **not** impact *client* use!)
+ where authentication status was not checked before processing channel-open
+ and other requests typically only sent after authenticating. Big thanks to
+ Matthijs Kooijman for the report.
+- :bug:`1168` Add newer key classes for Ed25519 and ECDSA to
+ ``paramiko.__all__`` so that code introspecting that attribute, or using
+ ``from paramiko import *`` (such as some IDEs) sees them. Thanks to
+ ``@patriksevallius`` for the patch.
+- :bug:`1039` Ed25519 auth key decryption raised an unexpected exception when
+ given a unicode password string (typical in python 3). Report by Theodor van
+ Nahl and fix by Pierce Lopez.
+- :release:`2.4.0 <2017-11-14>`
+- :feature:`-` Add a new ``passphrase`` kwarg to `SSHClient.connect
+ <paramiko.client.SSHClient.connect>` so users may disambiguate key-decryption
+ passphrases from password-auth passwords. (This is a backwards compatible
+ change; ``password`` will still pull double duty as a passphrase when
+ ``passphrase`` is not given.)
+- :support:`-` Update ``tearDown`` of client test suite to avoid hangs due to
+ eternally blocking ``accept()`` calls on the internal server thread (which
+ can occur when test code raises an exception before actually connecting to
+ the server.)
+- :bug:`1108 (1.17+)` Rename a private method keyword argument (which was named
+ ``async``) so that we're compatible with the upcoming Python 3.7 release
+ (where ``async`` is a new keyword.) Thanks to ``@vEpiphyte`` for the report.
+- :support:`1100` Updated the test suite & related docs/metadata/config to be
+ compatible with pytest instead of using the old, custom, crufty
+ unittest-based ``test.py``.
+
+ This includes marking known-slow tests (mostly the SFTP ones) so they can be
+ filtered out by ``inv test``'s default behavior; as well as other minor
+ tweaks to test collection and/or display (for example, GSSAPI tests are
+ collected, but skipped, instead of not even being collected by default as in
+ ``test.py``.)
+- :support:`- backported` Include LICENSE file in wheel archives.
+- :support:`1070` Drop Python 2.6 and Python 3.3 support; now only 2.7 and 3.4+
+ are supported. If you're unable to upgrade from 2.6 or 3.3, please stick to
+ the Paramiko 2.3.x (or below) release lines.
+- :release:`2.3.1 <2017-09-22>`
+- :bug:`1071` Certificate support broke the no-certificate case for Ed25519
+ keys (symptom is an ``AttributeError`` about ``public_blob``.) This went
+ uncaught due to cert autoload behavior (i.e. our test suite never actually
+ ran the no-cert case, because the cert existed!) Both issues have been fixed.
+ Thanks to John Hu for the report.
+- :release:`2.3.0 <2017-09-18>`
+- :release:`2.2.2 <2017-09-18>`
+- :release:`2.1.4 <2017-09-18>`
+- :release:`2.0.7 <2017-09-18>`
+- :release:`1.18.4 <2017-09-18>`
+- :bug:`1065` Add rekeying support to GSSAPI connections, which was erroneously
+ missing. Without this fix, any attempt to renegotiate the transport keys for
+ a ``gss-kex``-authed `~paramiko.transport.Transport` would cause a MIC
+ failure and terminate the connection. Thanks to Sebastian Deiß and Anselm
+ Kruis for the patch.
+- :feature:`1063` Add a ``gss_trust_dns`` option to ``Client`` and
+ ``Transport`` to allow explicitly setting whether or not DNS canonicalization
+ should occur when using GSSAPI. Thanks to Richard E. Silverman for the report
+ & Sebastian Deiß for initial patchset.
+- :bug:`1061` Clean up GSSAPI authentication procedures so they do not prevent
+ normal fallback to other authentication methods on failure. (In other words,
+ presence of GSSAPI functionality on a target server precluded use of _any_
+ other auth type if the user was unable to pass GSSAPI auth.) Patch via Anselm
+ Kruis.
+- :bug:`1060` Fix key exchange (kex) algorithm list for GSSAPI authentication;
+ previously, the list used solely out-of-date algorithms, and now contains
+ newer ones listed preferentially before the old. Credit: Anselm Kruis.
+- :bug:`1055 (1.17+)` (also :issue:`1056`, :issue:`1057`, :issue:`1058`,
+ :issue:`1059`) Fix up host-key checking in our GSSAPI support, which was
+ previously using an incorrect API call. Thanks to Anselm Kruis for the
+ patches.
+- :bug:`945 (1.18+)` (backport of :issue:`910` and re: :issue:`865`) SSHClient
+ now requests the type of host key it has (e.g. from known_hosts) and does not
+ consider a different type to be a "Missing" host key. This fixes a common
+ case where an ECDSA key is in known_hosts and the server also has an RSA host
+ key. Thanks to Pierce Lopez.
+- :support:`979` Update how we use `Cryptography <https://cryptography.io>`_'s
+ signature/verification methods so we aren't relying on a deprecated API.
+ Thanks to Paul Kehrer for the patch.
+
+ .. warning::
+ This bumps the minimum Cryptography version from 1.1 to 1.5. Such an
+ upgrade should be backwards compatible and easy to do. See `their changelog
+ <https://cryptography.io/en/latest/changelog/>`_ for additional details.
+- :support:`-` Ed25519 keys never got proper API documentation support; this
+ has been fixed.
+- :feature:`1026` Update `~paramiko.ed25519key.Ed25519Key` so its constructor
+ offers the same ``file_obj`` parameter as its sibling key classes. Credit:
+ Michal Kuffa.
+- :feature:`1013` Added pre-authentication banner support for the server
+ interface (`ServerInterface.get_banner
+ <paramiko.server.ServerInterface.get_banner>` plus related support in
+ ``Transport/AuthHandler``.) Patch courtesy of Dennis Kaarsemaker.
+- :bug:`60 major` (via :issue:`1037`) Paramiko originally defaulted to zlib
+ compression level 9 (when one connects with ``compression=True``; it defaults
+ to off.) This has been found to be quite wasteful and tends to cause much
+ longer transfers in most cases, than is necessary.
+
+ OpenSSH defaults to compression level 6, which is a much more reasonable
+ setting (nearly identical compression characteristics but noticeably,
+ sometimes significantly, faster transmission); Paramiko now uses this value
+ instead.
+
+ Thanks to Damien Dubé for the report and ``@DrNeutron`` for investigating &
+ submitting the patch.
+- :support:`-` Display exception type and message when logging auth-rejection
+ messages (ones reading ``Auth rejected: unsupported or mangled public key``);
+ previously this error case had a bare except and did not display exactly why
+ the key failed. It will now append info such as ``KeyError:
+ 'some-unknown-type-string'`` or similar.
+- :feature:`1042` (also partially :issue:`531`) Implement basic client-side
+ certificate authentication (as per the OpenSSH vendor extension.)
+
+ The core implementation is `PKey.load_certificate
+ <paramiko.pkey.PKey.load_certificate>` and its corresponding ``.public_blob``
+ attribute on key objects, which is honored in the auth and transport modules.
+ Additionally, `SSHClient.connect <paramiko.client.SSHClient.connect>` will
+ now automatically load certificate data alongside private key data when one
+ has appropriately-named cert files (e.g. ``id_rsa-cert.pub``) - see its
+ docstring for details.
+
+ Thanks to Jason Rigby for a first draft (:issue:`531`) and to Paul Kapp for
+ the second draft, upon which the current functionality has been based (with
+ modifications.)
+
+ .. note::
+ This support is client-focused; Paramiko-driven server code is capable of
+ handling cert-bearing pubkey auth packets, *but* it does not interpret any
+ cert-specific fields, so the end result is functionally identical to a
+ vanilla pubkey auth process (and thus requires e.g. prepopulated
+ authorized-keys data.) We expect full server-side cert support to follow
+ later.
+
+- :support:`1041` Modify logic around explicit disconnect
+ messages, and unknown-channel situations, so that they rely on centralized
+ shutdown code instead of running their own. This is at worst removing some
+ unnecessary code, and may help with some situations where Paramiko hangs at
+ the end of a session. Thanks to Paul Kapp for the patch.
+- :support:`1012` (via :issue:`1016`) Enhance documentation around the new
+ `SFTP.posix_rename <paramiko.sftp_client.SFTPClient.posix_rename>` method so
+ it's referenced in the 'standard' ``rename`` method for increased visibility.
+ Thanks to Marius Flage for the report.
+- :release:`2.2.1 <2017-06-13>`
+- :bug:`993` Ed25519 host keys were not comparable/hashable, causing an
+ exception if such a key existed in a ``known_hosts`` file. Thanks to Oleh
+ Prypin for the report and Pierce Lopez for the fix.
+- :bug:`990` The (added in 2.2.0) ``bcrypt`` dependency should have been on
+ version 3.1.3 or greater (was initially set to 3.0.0 or greater.) Thanks to
+ Paul Howarth for the report.
+- :release:`2.2.0 <2017-06-09>`
+- :release:`2.1.3 <2017-06-09>`
+- :release:`2.0.6 <2017-06-09>`
+- :release:`1.18.3 <2017-06-09>`
+- :release:`1.17.5 <2017-06-09>`
+- :bug:`865` SSHClient now requests the type of host key it has (e.g. from
+ known_hosts) and does not consider a different type to be a "Missing" host
+ key. This fixes a common case where an ECDSA key is in known_hosts and the
+ server also has an RSA host key. Thanks to Pierce Lopez.
+- :support:`906 (1.18+)` Clean up a handful of outdated imports and related
+ tweaks. Thanks to Pierce Lopez.
+- :bug:`984` Enhance default cipher preference order such that
+ ``aes(192|256)-cbc`` are preferred over ``blowfish-cbc``. Thanks to Alex
+ Gaynor.
+- :bug:`971 (1.17+)` Allow any type implementing the buffer API to be used with
+ `BufferedFile <paramiko.file.BufferedFile>`, `Channel
+ <paramiko.channel.Channel>`, and `SFTPFile <paramiko.sftp_file.SFTPFile>`.
+ This resolves a regression introduced in 1.13 with the Python 3 porting
+ changes, when using types such as ``memoryview``. Credit: Martin Packman.
+- :bug:`741` (also :issue:`809`, :issue:`772`; all via :issue:`912`) Writing
+ encrypted/password-protected private key files was silently broken since 2.0
+ due to an incorrect API call; this has been fixed.
+
+ Includes a directly related fix, namely adding the ability to read
+ ``AES-256-CBC`` ciphered private keys (which is now what we tend to write out
+ as it is Cryptography's default private key cipher.)
+
+ Thanks to ``@virlos`` for the original report, Chris Harris and ``@ibuler``
+ for initial draft PRs, and ``@jhgorrell`` for the final patch.
+- :feature:`65` (via :issue:`471`) Add support for OpenSSH's SFTP
+ ``posix-rename`` protocol extension (section 3.3 of `OpenSSH's protocol
+ extension document
+ <http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.31>`_),
+ via a new ``posix_rename`` method in `SFTPClient
+ <paramiko.sftp_client.SFTPClient.posix_rename>` and `SFTPServerInterface
+ <paramiko.sftp_si.SFTPServerInterface.posix_rename>`. Thanks to Wren Turkal
+ for the initial patch & Mika Pflüger for the enhanced, merged PR.
+- :feature:`869` Add an ``auth_timeout`` kwarg to `SSHClient.connect
+ <paramiko.client.SSHClient.connect>` (default: 30s) to avoid hangs when the
+ remote end becomes unresponsive during the authentication step. Credit to
+ ``@timsavage``.
+
+ .. note::
+ This technically changes behavior, insofar as very slow auth steps >30s
+ will now cause timeout exceptions instead of completing. We doubt most
+ users will notice; those affected can simply give a higher value to
+ ``auth_timeout``.
+
+- :support:`921` Tighten up the ``__hash__`` implementation for various key
+ classes; less code is good code. Thanks to Francisco Couzo for the patch.
+- :support:`956 backported (1.17+)` Switch code coverage service from
+ coveralls.io to codecov.io (& then disable the latter's auto-comments.)
+ Thanks to Nikolai Røed Kristiansen for the patch.
+- :bug:`983` Move ``sha1`` above the now-arguably-broken ``md5`` in the list of
+ preferred MAC algorithms, as an incremental security improvement for users
+ whose target systems offer both. Credit: Pierce Lopez.
+- :bug:`667` The RC4/arcfour family of ciphers has been broken since version
+ 2.0; but since the algorithm is now known to be completely insecure, we are
+ opting to remove support outright instead of fixing it. Thanks to Alex Gaynor
+ for catch & patch.
+- :feature:`857` Allow `SSHClient.set_missing_host_key_policy
+ <paramiko.client.SSHClient.set_missing_host_key_policy>` to accept policy
+ classes _or_ instances, instead of only instances, thus fixing a
+ long-standing gotcha for unaware users.
+- :feature:`951` Add support for ECDH key exchange (kex), specifically the
+ algorithms ``ecdh-sha2-nistp256``, ``ecdh-sha2-nistp384``, and
+ ``ecdh-sha2-nistp521``. They now come before the older ``diffie-hellman-*``
+ family of kex algorithms in the preferred-kex list. Thanks to Shashank
+ Veerapaneni for the patch & Pierce Lopez for a follow-up.
+- :support:`- backported` A big formatting pass to clean up an enormous number
+ of invalid Sphinx reference links, discovered by switching to a modern,
+ rigorous nitpicking doc-building mode.
+- :bug:`900` (via :issue:`911`) Prefer newer ``ecdsa-sha2-nistp`` keys over RSA
+ and DSA keys during host key selection. This improves compatibility with
+ OpenSSH, both in terms of general behavior, and also re: ability to properly
+ leverage OpenSSH-modified ``known_hosts`` files. Credit: ``@kasdoe`` for
+ original report/PR and Pierce Lopez for the second draft.
+- :bug:`794` (via :issue:`981`) Prior support for ``ecdsa-sha2-nistp(384|521)``
+ algorithms didn't fully extend to covering host keys, preventing connection
+ to hosts which only offer these key types and no others. This is now fixed.
+ Thanks to ``@ncoult`` and ``@kasdoe`` for reports and Pierce Lopez for the
+ patch.
+- :feature:`325` (via :issue:`972`) Add Ed25519 support, for both host keys
+ and user authentication. Big thanks to Alex Gaynor for the patch.
+
+ .. note::
+ This change adds the ``bcrypt`` and ``pynacl`` Python libraries as
+ dependencies. No C-level dependencies beyond those previously required (for
+ Cryptography) have been added.
+
+- :support:`974 backported` Overhaul the codebase to be PEP-8, etc, compliant
+ (i.e. passes the maintainer's preferred `flake8 <http://flake8.pycqa.org/>`_
+ configuration) and add a ``flake8`` step to the Travis config. Big thanks to
+ Dorian Pula!
+- :bug:`949 (1.17+)` SSHClient and Transport could cause a memory leak if
+ there's a connection problem or protocol error, even if ``Transport.close()``
+ is called. Thanks Kyle Agronick for the discovery and investigation, and
+ Pierce Lopez for assistance.
+- :bug:`683 (1.17+)` Make ``util.log_to_file`` append instead of replace.
+ Thanks to ``@vlcinsky`` for the report.
+- :release:`2.1.2 <2017-02-20>`
+- :release:`2.0.5 <2017-02-20>`
+- :release:`1.18.2 <2017-02-20>`
+- :release:`1.17.4 <2017-02-20>`
+- :bug:`853 (1.17+)` Tweak how `RSAKey.__str__ <paramiko.rsakey.RSAKey>`
+ behaves so it doesn't cause ``TypeError`` under Python 3. Thanks to Francisco
+ Couzo for the report.
+- :bug:`862 (1.17+)` (via :issue:`863`) Avoid test suite exceptions on
+ platforms lacking ``errno.ETIME`` (which seems to be some FreeBSD and some
+ Windows environments.) Thanks to Sofian Brabez.
+- :bug:`44 (1.17+)` (via :issue:`891`) `SSHClient <paramiko.client.SSHClient>`
+ now gives its internal `Transport <paramiko.transport.Transport>` a handle on
+ itself, preventing garbage collection of the client until the session is
+ closed. Without this, some code which returns stream or transport objects
+ without the client that generated them, would result in premature session
+ closure when the client was GCd. Credit: ``@w31rd0`` for original report,
+ Omer Anson for the patch.
+- :bug:`713 (<2.0)` (via :issue:`714` and :issue:`889`) Don't pass
+ initialization vectors to PyCrypto when dealing with counter-mode ciphers;
+ newer PyCrypto versions throw an exception otherwise (older ones simply
+ ignored this parameter altogether). Thanks to ``@jmh045000`` for report &
+ patches.
+- :bug:`895 (1.17+)` Fix a bug in server-mode concerning multiple interactive
+ auth steps (which were incorrectly responded to). Thanks to Dennis
+ Kaarsemaker for catch & patch.
+- :support:`866 backported (1.17+)` (also :issue:`838`) Remove an old
+ test-related file we don't support, and add PyPy to Travis-CI config. Thanks
+ to Pierce Lopez for the final patch and Pedro Rodrigues for an earlier
+ edition.
+- :release:`2.1.1 <2016-12-12>`
+- :release:`2.0.4 <2016-12-12>`
+- :release:`1.18.1 <2016-12-12>`
+- :bug:`859 (1.18+)` (via :issue:`860`) A tweak to the original patch
+ implementing :issue:`398` was not fully applied, causing calls to
+ `~paramiko.client.SSHClient.invoke_shell` to fail with ``AttributeError``.
+ This has been fixed. Patch credit: Kirk Byers.
+- :bug:`-` Accidentally merged the new features from 1.18.0 into the
+ 2.0.x bugfix-only branch. This included merging a bug in one of those new
+ features (breaking `~paramiko.client.SSHClient.invoke_shell` with an
+ ``AttributeError``.) The offending code has been stripped out of the 2.0.x
+ line (but of course, remains in 2.1.x and above.)
+- :bug:`859` (via :issue:`860`) A tweak to the original patch implementing
+ :issue:`398` was not fully applied, causing calls to
+ `~paramiko.client.SSHClient.invoke_shell` to fail with ``AttributeError``.
+ This has been fixed. Patch credit: Kirk Byers.
+- :release:`2.1.0 <2016-12-09>`
+- :release:`2.0.3 <2016-12-09>`
+- :release:`1.18.0 <2016-12-09>`
+- :release:`1.17.3 <2016-12-09>`
+- :bug:`802 (1.17+)` (via :issue:`804`) Update our vendored Windows API module
+ to address errors of the form ``AttributeError: 'module' object has no
+ attribute 'c_ssize_t'``. Credit to Jason R. Coombs.
+- :bug:`824 (1.17+)` Fix the implementation of ``PKey.write_private_key_file``
+ (this method is only publicly defined on subclasses; the fix was in the
+ private real implementation) so it passes the correct params to ``open()``.
+ This bug apparently went unnoticed and unfixed for 12 entire years. Congrats
+ to John Villalovos for noticing & submitting the patch!
+- :support:`801 backported (1.17+)` Skip a Unix-only test when on Windows;
+ thanks to Gabi Davar.
+- :support:`792 backported (1.17+)` Minor updates to the README and demos;
+ thanks to Alan Yee.
+- :feature:`780 (1.18+)` (also :issue:`779`, and may help users affected by
+ :issue:`520`) Add an optional ``timeout`` parameter to
+ `Transport.start_client <paramiko.transport.Transport.start_client>` (and
+ feed it the value of the configured connection timeout when used within
+ `SSHClient <paramiko.client.SSHClient>`.) This helps prevent situations where
+ network connectivity isn't timing out, but the remote server is otherwise
+ unable to service the connection in a timely manner. Credit to
+ ``@sanseihappa``.
+- :bug:`742` (also re: :issue:`559`) Catch ``AssertionError`` thrown by
+ Cryptography when attempting to load bad ECDSA keys, turning it into an
+ ``SSHException``. This moves the behavior in line with other "bad keys"
+ situations, re: Paramiko's main auth loop. Thanks to MengHuan Yu for the
+ patch.
+- :bug:`789 (1.17+)` Add a missing ``.closed`` attribute (plus ``._closed``
+ because reasons) to `ProxyCommand <paramiko.proxy.ProxyCommand>` so the
+ earlier partial fix for :issue:`520` works in situations where one is
+ gatewaying via ``ProxyCommand``.
+- :bug:`334 (1.17+)` Make the ``subprocess`` import in ``proxy.py`` lazy so
+ users on platforms without it (such as Google App Engine) can import Paramiko
+ successfully. (Relatedly, make it easier to tweak an active socket check
+ timeout [in `Transport <paramiko.transport.Transport>`] which was previously
+ hardcoded.) Credit: Shinya Okano.
+- :support:`854 backported (1.17+)` Fix incorrect docstring/param-list for
+ `Transport.auth_gssapi_keyex
+ <paramiko.transport.Transport.auth_gssapi_keyex>` so it matches the real
+ signature. Caught by ``@Score_Under``.
+- :bug:`681 (1.17+)` Fix a Python3-specific bug re: the handling of read
+ buffers when using ``ProxyCommand``. Thanks to Paul Kapp for catch & patch.
+- :feature:`398 (1.18+)` Add an ``environment`` dict argument to
+ `Client.exec_command <paramiko.client.SSHClient.exec_command>` (plus the
+ lower level `Channel.update_environment
+ <paramiko.channel.Channel.update_environment>` and
+ `Channel.set_environment_variable
+ <paramiko.channel.Channel.set_environment_variable>` methods) which
+ implements the ``env`` SSH message type. This means the remote shell
+ environment can be set without the use of ``VARNAME=value`` shell tricks,
+ provided the server's ``AcceptEnv`` lists the variables you need to set.
+ Thanks to Philip Lorenz for the pull request.
+- :support:`819 backported (>=1.15,<2.0)` Document how lacking ``gmp`` headers
+ at install time can cause a significant performance hit if you build PyCrypto
+ from source. (Most system-distributed packages already have this enabled.)
+- :release:`2.0.2 <2016-07-25>`
+- :release:`1.17.2 <2016-07-25>`
+- :release:`1.16.3 <2016-07-25>`
+- :bug:`673 (1.16+)` (via :issue:`681`) Fix protocol banner read errors
+ (``SSHException``) which would occasionally pop up when using
+ ``ProxyCommand`` gatewaying. Thanks to ``@Depado`` for the initial report and
+ Paul Kapp for the fix.
+- :bug:`774 (1.16+)` Add a ``_closed`` private attribute to
+ `~paramiko.channel.Channel` objects so that they continue functioning when
+ used as proxy sockets under Python 3 (e.g. as ``direct-tcpip`` gateways for
+ other Paramiko connections.)
+- :bug:`758 (1.16+)` Apply type definitions to ``_winapi`` module from
+ `jaraco.windows <https://github.com/jaraco/jaraco.windows>`_ 3.6.1. This
+ should address issues on Windows platforms that often result in errors like
+ ``ArgumentError: [...] int too long to convert``. Thanks to ``@swohlerLL``
+ for the report and Jason R. Coombs for the patch.
+- :release:`2.0.1 <2016-06-21>`
+- :release:`1.17.1 <2016-06-21>`
+- :release:`1.16.2 <2016-06-21>`
+- :bug:`520 (1.16+)` (Partial fix) Fix at least one instance of race condition
+ driven threading hangs at end of the Python interpreter session. (Includes a
+ docs update as well - always make sure to ``.close()`` your clients!)
+- :bug:`537 (1.16+)` Fix a bug in `BufferedPipe.set_event
+ <paramiko.buffered_pipe.BufferedPipe.set_event>` which could cause
+ deadlocks/hangs when one uses `select.select` against
+ `~paramiko.channel.Channel` objects (or otherwise calls `Channel.fileno
+ <paramiko.channel.Channel.fileno>` after the channel has closed). Thanks to
+ Przemysław Strzelczak for the report & reproduction case, and to Krzysztof
+ Rusek for the fix.
+- :release:`2.0.0 <2016-04-28>`
+- :release:`1.17.0 <2016-04-28>`
+- :release:`1.16.1 <2016-04-28>`
+- :release:`1.15.5 <2016-04-28>`
+- :feature:`731` (working off the earlier :issue:`611`) Add support for 384-
+ and 512-bit elliptic curve groups in ECDSA key types (aka
+ ``ecdsa-sha2-nistp384`` / ``ecdsa-sha2-nistp521``). Thanks to Michiel Tiller
+ and ``@CrazyCasta`` for the patches.
+- :bug:`670` Due to an earlier bugfix, less-specific ``Host`` blocks'
+ ``ProxyCommand`` values were overriding ``ProxyCommand none`` in
+ more-specific ``Host`` blocks. This has been fixed in a backwards compatible
+ manner (i.e. ``ProxyCommand none`` continues to appear as a total lack of any
+ ``proxycommand`` key in parsed config structures). Thanks to Pat Brisbin for
+ the catch.
+- :bug:`676` (via :issue:`677`) Fix a backwards incompatibility issue that
+ cropped up in `SFTPFile.prefetch <paramiko.sftp_file.SFTPFile.prefetch>` re:
+ the erroneously non-optional ``file_size`` parameter. Should only affect
+ users who manually call ``prefetch``. Thanks to ``@stevevanhooser`` for catch
+ & patch.
+- :feature:`394` Replace PyCrypto with the Python Cryptographic Authority
+ (PyCA) 'Cryptography' library suite. This improves security, installability,
+ and performance; adds PyPy support; and much more.
+
+ There aren't enough ways to thank Alex Gaynor for all of his work on this,
+ and then his patience while the maintainer let his PR grow moss for a year
+ and change. Paul Kehrer came in with an assist, and I think I saw Olle
+ Lundberg, ``@techtonik`` and ``@johnthagen`` supplying backup as well. Thanks
+ to all!
+
+ .. warning::
+ **This is a backwards incompatible change.**
+
+ However, **it should only affect installation** requirements; **no API
+ changes are intended or expected**. Please report any such breakages as
+ bugs.
+
+ See our updated :doc:`installation docs <installing>` for details on what
+ is now required to install Paramiko; many/most users should be able to
+ simply ``pip install -U paramiko`` (especially if you **upgrade to pip
+ 8**).
+
+- :bug:`577` (via :issue:`578`; should also fix :issue:`718`, :issue:`560`) Fix
+ stalled/hung SFTP downloads by cleaning up some threading lock issues. Thanks
+ to Stephen C. Pope for the patch.
+- :bug:`716` Fix a Python 3 compatibility issue when handling two-factor
+ authentication. Thanks to Mateusz Kowalski for the catch & original patch.
+- :support:`729 backported (>=1.15,<2.0)` Clean up ``setup.py`` to always use
+ ``setuptools``, not doing so was a historical artifact from bygone days.
+ Thanks to Alex Gaynor.
+- :bug:`649 major (==1.17)` Update the module in charge of handling SSH moduli
+ so it's consistent with OpenSSH behavior re: prime number selection. Thanks
+ to Damien Tournoud for catch & patch.
+- :bug:`617` (aka `fabric/fabric#1429
+ <https://github.com/fabric/fabric/issues/1429>`_; via :issue:`679`; related:
+ :issue:`678`, :issue:`685`, :issue:`615` & :issue:`616`) Fix up
+ `~paramiko.ssh_exception.NoValidConnectionsError` so it pickles correctly,
+ and fix a related Python 3 compatibility issue. Thanks to Rebecca Schlussel
+ for the report & Marius Gedminas for the patch.
+- :bug:`613` (via :issue:`619`) Update to ``jaraco.windows`` 3.4.1 to fix some
+ errors related to ``ctypes`` on Windows platforms. Credit to Jason R. Coombs.
+- :support:`621 backported (>=1.15,<2.0)` Annotate some public attributes on
+ `~paramiko.channel.Channel` such as ``.closed``. Thanks to Sergey Vasilyev
+ for the report.
+- :bug:`632` Fix logic bug in the SFTP client's callback-calling functionality;
+ previously there was a chance the given callback would fire twice at the end
+ of a transfer. Thanks to ``@ab9-er`` for catch & original patch.
+- :support:`612 backported (>=1.15,<2.0)` Identify & work around a race
+ condition in the test for handshake timeouts, which was causing frequent test
+ failures for a subset of contributors as well as Travis-CI (usually, but not
+ always, limited to Python 3.5). Props to Ed Kellett for assistance during
+ some of the troubleshooting.
+- :support:`697 backported (>=1.15,<2.0)` Remove whitespace in our
+ ``setup.py``'s ``install_requires`` as it triggers occasional bugs in some
+ versions of ``setuptools``. Thanks to Justin Lecher for catch & original
+ patch.
+- :bug:`499` Strip trailing/leading whitespace from lines when parsing SSH
+ config files - this brings things in line with OpenSSH behavior. Thanks to
+ Alfredo Esteban for the original report and Nick Pillitteri for the patch.
+- :bug:`652` Fix behavior of ``gssapi-with-mic`` auth requests so they fail
+ gracefully (allowing followup via other auth methods) instead of raising an
+ exception. Patch courtesy of ``@jamercee``.
+- :feature:`588 (==1.17)` Add missing file-like object methods for
+ `~paramiko.file.BufferedFile` and `~paramiko.sftp_file.SFTPFile`. Thanks to
+ Adam Meily for the patch.
+- :support:`636 backported (>=1.15,<2.0)` Clean up and enhance the README (and
+ rename it to ``README.rst`` from just ``README``). Thanks to ``@LucasRMehl``.
+- :release:`1.16.0 <2015-11-04>`
+- :bug:`194 major` (also :issue:`562`, :issue:`530`, :issue:`576`) Streamline
+ use of ``stat`` when downloading SFTP files via `SFTPClient.get
+ <paramiko.sftp_client.SFTPClient.get>`; this avoids triggering bugs in some
+ off-spec SFTP servers such as IBM Sterling. Thanks to ``@muraleee`` for the
+ initial report and to Torkil Gustavsen for the patch.
+- :feature:`467` (also :issue:`139`, :issue:`412`) Fully enable two-factor
+ authentication (e.g. when a server requires ``AuthenticationMethods
+ pubkey,keyboard-interactive``). Thanks to ``@perryjrandall`` for the patch
+ and to ``@nevins-b`` and Matt Robenolt for additional support.
+- :bug:`502 major` Fix 'exec' requests in server mode to use ``get_string``
+ instead of ``get_text`` to avoid ``UnicodeDecodeError`` on non-UTF-8 input.
+ Thanks to Anselm Kruis for the patch & discussion.
+- :bug:`401` Fix line number reporting in log output regarding invalid
+ ``known_hosts`` line entries. Thanks to Dylan Thacker-Smith for catch &
+ patch.
+- :support:`525 backported` Update the vendored Windows API addon to a more
+ recent edition. Also fixes :issue:`193`, :issue:`488`, :issue:`498`. Thanks
+ to Jason Coombs.
+- :release:`1.15.4 <2015-11-02>`
+- :release:`1.14.3 <2015-11-02>`
+- :release:`1.13.4 <2015-11-02>`
+- :bug:`366` Fix `~paramiko.sftp_attr.SFTPAttributes` so its string
+ representation doesn't raise exceptions on empty/initialized instances. Patch
+ by Ulrich Petri.
+- :bug:`359` Use correct attribute name when trying to use Python 3's
+ ``int.bit_length`` method; prior to fix, the Python 2 custom fallback
+ implementation was always used, even on Python 3. Thanks to Alex Gaynor.
+- :support:`594 backported` Correct some post-Python3-port docstrings to
+ specify ``bytes`` type instead of ``str``. Credit to ``@redixin``.
+- :bug:`565` Don't explode with ``IndexError`` when reading private key files
+ lacking an ``-----END <type> PRIVATE KEY-----`` footer. Patch courtesy of
+ Prasanna Santhanam.
+- :feature:`604` Add support for the ``aes192-ctr`` and ``aes192-cbc`` ciphers.
+ Thanks to Michiel Tiller for noticing it was as easy as tweaking some key
+ sizes :D
+- :feature:`356` (also :issue:`596`, :issue:`365`, :issue:`341`, :issue:`164`,
+ :issue:`581`, and a bunch of other duplicates besides) Add support for SHA-2
+ based key exchange (kex) algorithm ``diffie-hellman-group-exchange-sha256``
+ and (H)MAC algorithms ``hmac-sha2-256`` and ``hmac-sha2-512``.
+
+ This change includes tweaks to debug-level logging regarding
+ algorithm-selection handshakes; the old all-in-one log line is now multiple
+ easier-to-read, printed-at-handshake-time log lines.
+
+ Thanks to the many people who submitted patches for this functionality and/or
+ assisted in testing those patches. That list includes but is not limited to,
+ and in no particular order: Matthias Witte, Dag Wieers, Ash Berlin, Etienne
+ Perot, Gert van Dijk, ``@GuyShaanan``, Aaron Bieber, ``@cyphase``, and Eric
+ Brown.
+- :release:`1.15.3 <2015-10-02>`
+- :support:`554 backported` Fix inaccuracies in the docstring for the ECDSA key
+ class. Thanks to Jared Hance for the patch.
+- :support:`516 backported` Document `~paramiko.agent.AgentRequestHandler`.
+ Thanks to ``@toejough`` for report & suggestions.
+- :bug:`496 (1.15+)` Fix a handful of small but critical bugs in Paramiko's
+ GSSAPI support (note: this includes switching from PyCrypo's Random to
+ `os.urandom`). Thanks to Anselm Kruis for catch & patch.
+- :bug:`491` (combines :issue:`62` and :issue:`439`) Implement timeout
+ functionality to address hangs from dropped network connections and/or failed
+ handshakes. Credit to ``@vazir`` and ``@dacut`` for the original patches and
+ to Olle Lundberg for reimplementation.
+- :bug:`490` Skip invalid/unparsable lines in ``known_hosts`` files, instead
+ of raising `~paramiko.ssh_exception.SSHException`. This brings Paramiko's
+ behavior more in line with OpenSSH, which silently ignores such input. Catch
+ & patch courtesy of Martin Topholm.
+- :bug:`404` Print details when displaying
+ `~paramiko.ssh_exception.BadHostKeyException` objects (expected vs received
+ data) instead of just "hey shit broke". Patch credit: Loic Dachary.
+- :bug:`469` (also :issue:`488`, :issue:`461` and like a dozen others) Fix a
+ typo introduced in the 1.15 release which broke WinPageant support. Thanks to
+ everyone who submitted patches, and to Steve Cohen who was the lucky winner
+ of the cherry-pick lottery.
+- :bug:`353` (via :issue:`482`) Fix a bug introduced in the Python 3 port
+ which caused ``OverFlowError`` (and other symptoms) in SFTP functionality.
+ Thanks to ``@dboreham`` for leading the troubleshooting charge, and to
+ Scott Maxwell for the final patch.
+- :support:`582` Fix some old ``setup.py`` related helper code which was
+ breaking ``bdist_dumb`` on Mac OS X. Thanks to Peter Odding for the patch.
+- :bug:`22 major` Try harder to connect to multiple network families (e.g. IPv4
+ vs IPv6) in case of connection issues; this helps with problems such as hosts
+ which resolve both IPv4 and IPv6 addresses but are only listening on IPv4.
+ Thanks to Dries Desmet for original report and Torsten Landschoff for the
+ foundational patchset.
+- :bug:`402` Check to see if an SSH agent is actually present before trying to
+ forward it to the remote end. This replaces what was usually a useless
+ ``TypeError`` with a human-readable
+ `~paramiko.ssh_exception.AuthenticationException`. Credit to Ken Jordan for
+ the fix and Yvan Marques for original report.
+- :release:`1.15.2 <2014-12-19>`
+- :release:`1.14.2 <2014-12-19>`
+- :release:`1.13.3 <2014-12-19>`
+- :bug:`413` (also :issue:`414`, :issue:`420`, :issue:`454`) Be significantly
+ smarter about polling & timing behavior when running proxy commands, to avoid
+ unnecessary (often 100%!) CPU usage. Major thanks to Jason Dunsmore for
+ report & initial patchset and to Chris Adams & John Morrissey for followup
+ improvements.
+- :bug:`455` Tweak packet size handling to conform better to the OpenSSH RFCs;
+ this helps address issues with interactive program cursors. Courtesy of Jeff
+ Quast.
+- :bug:`428` Fix an issue in `~paramiko.file.BufferedFile` (primarily used in
+ the SFTP modules) concerning incorrect behavior by
+ `~paramiko.file.BufferedFile.readlines` on files whose size exceeds the
+ buffer size. Thanks to ``@achapp`` for catch & patch.
+- :bug:`415` Fix ``ssh_config`` parsing to correctly interpret ``ProxyCommand
+ none`` as the lack of a proxy command, instead of as a literal command string
+ of ``"none"``. Thanks to Richard Spiers for the catch & Sean Johnson for the
+ fix.
+- :support:`431 backported` Replace handrolled ``ssh_config`` parsing code with
+ use of the ``shlex`` module. Thanks to Yan Kalchevskiy.
+- :support:`422 backported` Clean up some unused imports. Courtesy of Olle
+ Lundberg.
+- :support:`421 backported` Modernize threading calls to use newer API. Thanks
+ to Olle Lundberg.
+- :support:`419 backported` Modernize a bunch of the codebase internals to
+ leverage decorators. Props to ``@beckjake`` for realizing we're no longer on
+ Python 2.2 :D
+- :bug:`266` Change numbering of `~paramiko.transport.Transport` channels to
+ start at 0 instead of 1 for better compatibility with OpenSSH & certain
+ server implementations which break on 1-indexed channels. Thanks to
+ ``@egroeper`` for catch & patch.
+- :bug:`459` Tighten up agent connection closure behavior to avoid spurious
+ ``ResourceWarning`` display in some situations. Thanks to ``@tkrapp`` for the
+ catch.
+- :bug:`429` Server-level debug message logging was overlooked during the
+ Python 3 compatibility update; Python 3 clients attempting to log SSH debug
+ packets encountered type errors. This is now fixed. Thanks to ``@mjmaenpaa``
+ for the catch.
+- :bug:`320` Update our win_pageant module to be Python 3 compatible. Thanks to
+ ``@sherbang`` and ``@adamkerz`` for the patches.
+- :release:`1.15.1 <2014-09-22>`
+- :bug:`399` SSH agent forwarding (potentially other functionality as
+ well) would hang due to incorrect values passed into the new window size
+ arguments for `~paramiko.transport.Transport` (thanks to a botched merge).
+ This has been corrected. Thanks to Dylan Thacker-Smith for the report &
+ patch.
+- :feature:`167` Add `~paramiko.config.SSHConfig.get_hostnames` for easier
+ introspection of a loaded SSH config file or object. Courtesy of Søren
+ Løvborg.
+- :release:`1.15.0 <2014-09-18>`
+- :support:`393` Replace internal use of PyCrypto's ``SHA.new`` with the
+ stdlib's ``hashlib.sha1``. Thanks to Alex Gaynor.
+- :feature:`267` (also :issue:`250`, :issue:`241`, :issue:`228`) Add GSS-API /
+ SSPI (e.g. Kerberos) key exchange and authentication support
+ (:ref:`installation docs here <gssapi>`). Mega thanks to Sebastian Deiß, with
+ assist by Torsten Landschoff.
+
+ .. note::
+ Unix users should be aware that the ``python-gssapi`` library (a
+ requirement for using this functionality) only appears to support
+ Python 2.7 and up at this time.
+
+- :bug:`346 major` Fix an issue in private key files' encryption salts that
+ could cause tracebacks and file corruption if keys were re-encrypted. Credit
+ to Xavier Nunn.
+- :feature:`362` Allow users to control the SSH banner timeout. Thanks to Cory
+ Benfield.
+- :feature:`372` Update default window & packet sizes to more closely adhere to
+ the pertinent RFC; also expose these settings in the public API so they may
+ be overridden by client code. This should address some general speed issues
+ such as :issue:`175`. Big thanks to Olle Lundberg for the update.
+- :bug:`373 major` Attempt to fix a handful of issues (such as :issue:`354`)
+ related to infinite loops and threading deadlocks. Thanks to Olle Lundberg as
+ well as a handful of community members who provided advice & feedback via
+ IRC.
+- :support:`374` (also :issue:`375`) Old code cleanup courtesy of Olle
+ Lundberg.
+- :support:`377` Factor `~paramiko.channel.Channel` openness sanity check into
+ a decorator. Thanks to Olle Lundberg for original patch.
+- :bug:`298 major` Don't perform point validation on ECDSA keys in
+ ``known_hosts`` files, since a) this can cause significant slowdown when such
+ keys exist, and b) ``known_hosts`` files are implicitly trustworthy. Thanks
+ to Kieran Spear for catch & patch.
+
+ .. note::
+ This change bumps up the version requirement for the ``ecdsa`` library to
+ ``0.11``.
+
+- :bug:`234 major` Lower logging levels for a few overly-noisy log messages
+ about secure channels. Thanks to David Pursehouse for noticing & contributing
+ the fix.
+- :feature:`218` Add support for ECDSA private keys on the client side. Thanks
+ to ``@aszlig`` for the patch.
+- :bug:`335 major` Fix ECDSA key generation (generation of brand new ECDSA keys
+ was broken previously). Thanks to ``@solarw`` for catch & patch.
+- :feature:`184` Support quoted values in SSH config file parsing. Credit to
+ Yan Kalchevskiy.
+- :feature:`131` Add a `~paramiko.sftp_client.SFTPClient.listdir_iter` method
+ to `~paramiko.sftp_client.SFTPClient` allowing for more efficient,
+ async/generator based file listings. Thanks to John Begeman.
+- :support:`378 backported` Minor code cleanup in the SSH config module
+ courtesy of Olle Lundberg.
+- :support:`249 backported` Consolidate version information into one spot.
+ Thanks to Gabi Davar for the reminder.
+- :release:`1.14.1 <2014-08-25>`
+- :release:`1.13.2 <2014-08-25>`
+- :bug:`376` Be less aggressive about expanding variables in ``ssh_config``
+ files, which results in a speedup of SSH config parsing. Credit to Olle
+ Lundberg.
+- :support:`324 backported` A bevvy of documentation typo fixes, courtesy of Roy
+ Wellington.
+- :bug:`312` `paramiko.transport.Transport` had a bug in its ``__repr__`` which
+ surfaces during errors encountered within its ``__init__``, causing
+ problematic tracebacks in such situations. Thanks to Simon Percivall for
+ catch & patch.
+- :bug:`272` Fix a bug where ``known_hosts`` parsing hashed the input hostname
+ as well as the hostnames from the ``known_hosts`` file, on every comparison.
+ Thanks to ``@sigmunau`` for final patch and ``@ostacey`` for the original
+ report.
+- :bug:`239` Add Windows-style CRLF support to SSH config file parsing. Props
+ to Christopher Swenson.
+- :support:`229 backported` Fix a couple of incorrectly-copied docstrings' ``..
+ versionadded::`` RST directives. Thanks to Aarni Koskela for the catch.
+- :support:`169 backported` Minor refactor of
+ `paramiko.sftp_client.SFTPClient.put` thanks to Abhinav Upadhyay.
+- :bug:`285` (also :issue:`352`) Update our Python 3 ``b()`` compatibility shim
+ to handle ``buffer`` objects correctly; this fixes a frequently reported
+ issue affecting many users, including users of the ``bzr`` software suite.
+ Thanks to ``@basictheprogram`` for the initial report, Jelmer Vernooij for
+ the fix and Andrew Starr-Bochicchio & Jeremy T. Bouse (among others) for
+ discussion & feedback.
+- :support:`371` Add Travis support & docs update for Python 3.4. Thanks to
+ Olle Lundberg.
+- :release:`1.14.0 <2014-05-07>`
+- :release:`1.13.1 <2014-05-07>`
+- :release:`1.12.4 <2014-05-07>`
+- :release:`1.11.6 <2014-05-07>`
+- :bug:`-` `paramiko.file.BufferedFile.read` incorrectly returned text strings
+ after the Python 3 migration, despite bytes being more appropriate for file
+ contents (which may be binary or of an unknown encoding.) This has been
+ addressed.
+
+ .. note::
+ `paramiko.file.BufferedFile.readline` continues to return strings, not
+ bytes, as "lines" only make sense for textual data. It assumes UTF-8 by
+ default.
+
+ This should fix `this issue raised on the Obnam mailing list
+ <http://comments.gmane.org/gmane.comp.sysutils.backup.obnam/252>`_. Thanks
+ to Antoine Brenner for the patch.
+- :bug:`-` Added self.args for exception classes. Used for unpickling. Related
+ to (`Fabric #986 <https://github.com/fabric/fabric/issues/986>`_, `Fabric
+ #714 <https://github.com/fabric/fabric/issues/714>`_). Thanks to Alex
+ Plugaru.
+- :bug:`-` Fix logging error in sftp_client for filenames containing the '%'
+ character. Thanks to Antoine Brenner.
+- :bug:`308` Fix regression in dsskey.py that caused sporadic signature
+ verification failures. Thanks to Chris Rose.
+- :support:`299` Use deterministic signatures for ECDSA keys for improved
+ security. Thanks to Alex Gaynor.
+- :support:`297` Replace PyCrypto's ``Random`` with `os.urandom` for improved
+ speed and security. Thanks again to Alex.
+- :support:`295` Swap out a bunch of PyCrypto hash functions with use of
+ `hashlib`. Thanks to Alex Gaynor.
+- :support:`290` (also :issue:`292`) Add support for building universal
+ (Python 2+3 compatible) wheel files during the release process. Courtesy of
+ Alex Gaynor.
+- :support:`284` Add Python language trove identifiers to ``setup.py``. Thanks
+ to Alex Gaynor for catch & patch.
+- :bug:`235` Improve string type testing in a handful of spots (e.g. ``s/if
+ type(x) is str/if isinstance(x, basestring)/g``.) Thanks to ``@ksamuel`` for
+ the report.
+- :release:`1.13.0 <2014-03-13>`
+- :release:`1.12.3 <2014-03-13>`
+- :release:`1.11.5 <2014-03-13>`
+- :release:`1.10.7 <2014-03-13>`
+- :feature:`16` **Python 3 support!** Our test suite passes under Python 3, and
+ it (& Fabric's test suite) continues to pass under Python 2. **Python 2.5 is
+ no longer supported with this change!**
+
+ The merged code was built on many contributors' efforts, both code &
+ feedback. In no particular order, we thank Daniel Goertzen, Ivan Kolodyazhny,
+ Tomi Pieviläinen, Jason R. Coombs, Jan N. Schulze, ``@Lazik``, Dorian Pula,
+ Scott Maxwell, Tshepang Lekhonkhobe, Aaron Meurer, and Dave Halter.
+- :support:`256 backported` Convert API documentation to Sphinx, yielding a new
+ API docs website to replace the old Epydoc one. Thanks to Olle Lundberg for
+ the initial conversion work.
+- :bug:`-` Use constant-time hash comparison operations where possible, to
+ protect against `timing-based attacks
+ <http://codahale.com/a-lesson-in-timing-attacks/>`_. Thanks to Alex Gaynor
+ for the patch.
+- :release:`1.12.2 <2014-02-14>`
+- :release:`1.11.4 <2014-02-14>`
+- :release:`1.10.6 <2014-02-14>`
+- :feature:`58` Allow client code to access the stored SSH server banner via
+ `Transport.get_banner <paramiko.transport.Transport.get_banner>`. Thanks to
+ ``@Jhoanor`` for the patch.
+- :bug:`252` (`Fabric #1020 <https://github.com/fabric/fabric/issues/1020>`_)
+ Enhanced the implementation of ``ProxyCommand`` to avoid a deadlock/hang
+ condition that frequently occurs at ``Transport`` shutdown time. Thanks to
+ Mateusz Kobos, Matthijs van der Vleuten and Guillaume Zitta for the original
+ reports and to Marius Gedminas for helping test nontrivial use cases.
+- :bug:`268` Fix some missed renames of ``ProxyCommand`` related error classes.
+ Thanks to Marius Gedminas for catch & patch.
+- :bug:`34` (PR :issue:`35`) Fix SFTP prefetching incompatibility with some
+ SFTP servers regarding request/response ordering. Thanks to Richard
+ Kettlewell.
+- :bug:`193` (and its attentant PRs :issue:`230` & :issue:`253`) Fix SSH agent
+ problems present on Windows. Thanks to David Hobbs for initial report and to
+ Aarni Koskela & Olle Lundberg for the patches.
+- :release:`1.12.1 <2014-01-08>`
+- :release:`1.11.3 <2014-01-08>`
+- :release:`1.10.5 <2014-01-08>`
+- :bug:`225 (1.12+)` Note ecdsa requirement in README. Thanks to Amaury
+ Rodriguez for the catch.
+- :bug:`176` Fix AttributeError bugs in known_hosts file (re)loading. Thanks
+ to Nathan Scowcroft for the patch & Martin Blumenstingl for the initial test
+ case.
+- :release:`1.12.0 <2013-09-27>`
+- :release:`1.11.2 <2013-09-27>`
+- :release:`1.10.4 <2013-09-27>`
+- :feature:`152` Add tentative support for ECDSA keys. **This adds the ecdsa
+ module as a new dependency of Paramiko.** The module is available at
+ `warner/python-ecdsa on Github <https://github.com/warner/python-ecdsa>`_ and
+ `ecdsa on PyPI <https://pypi.python.org/pypi/ecdsa>`_.
+
+ * Note that you might still run into problems with key negotiation --
+ Paramiko picks the first key that the server offers, which might not be
+ what you have in your known_hosts file.
+ * Mega thanks to Ethan Glasser-Camp for the patch.
+
+- :feature:`136` Add server-side support for the SSH protocol's 'env' command.
+ Thanks to Benjamin Pollack for the patch.
+- :bug:`156 (1.11+)` Fix potential deadlock condition when using Channel
+ objects as sockets (e.g. when using SSH gatewaying). Thanks to Steven Noonan
+ and Frank Arnold for catch & patch.
+- :bug:`179` Fix a missing variable causing errors when an ssh_config file has
+ a non-default AddressFamily set. Thanks to Ed Marshall & Tomaz Muraus for
+ catch & patch.
+- :bug:`200` Fix an exception-causing typo in ``demo_simple.py``. Thanks to Alex
+ Buchanan for catch & Dave Foster for patch.
+- :bug:`199` Typo fix in the license header cross-project. Thanks to Armin
+ Ronacher for catch & patch.
+- :release:`1.11.1 <2013-09-20>`
+- :release:`1.10.3 <2013-09-20>`
+- :bug:`162` Clean up HMAC module import to avoid deadlocks in certain uses of
+ SSHClient. Thanks to Gernot Hillier for the catch & suggested fix.
+- :bug:`36` Fix the port-forwarding demo to avoid file descriptor errors.
+ Thanks to Jonathan Halcrow for catch & patch.
+- :bug:`168` Update config handling to properly handle multiple 'localforward'
+ and 'remoteforward' keys. Thanks to Emre Yılmaz for the patch.
+- :release:`1.11.0 <2013-07-26>`
+- :release:`1.10.2 <2013-07-26>`
+- :bug:`98 major` On Windows, when interacting with the PuTTY PAgeant, Paramiko
+ now creates the shared memory map with explicit Security Attributes of the
+ user, which is the same technique employed by the canonical PuTTY library to
+ avoid permissions issues when Paramiko is running under a different UAC
+ context than the PuTTY Ageant process. Thanks to Jason R. Coombs for the
+ patch.
+- :support:`100` Remove use of PyWin32 in ``win_pageant`` module. Module was
+ already dependent on ctypes for constructing appropriate structures and had
+ ctypes implementations of all functionality. Thanks to Jason R. Coombs for
+ the patch.
+- :bug:`87 major` Ensure updates to ``known_hosts`` files account for any
+ updates to said files after Paramiko initially read them. (Includes related
+ fix to guard against duplicate entries during subsequent ``known_hosts``
+ loads.) Thanks to ``@sunweaver`` for the contribution.
+- :bug:`153` (also :issue:`67`) Warn on parse failure when reading known_hosts
+ file. Thanks to ``@glasserc`` for patch.
+- :bug:`146` Indentation fixes for readability. Thanks to Abhinav Upadhyay for
+ catch & patch.
+- :release:`1.10.1 <2013-04-05>`
+- :bug:`142` (`Fabric #811 <https://github.com/fabric/fabric/issues/811>`_)
+ SFTP put of empty file will still return the attributes of the put file.
+ Thanks to Jason R. Coombs for the patch.
+- :bug:`154` (`Fabric #876 <https://github.com/fabric/fabric/issues/876>`_)
+ Forwarded SSH agent connections left stale local pipes lying around, which
+ could cause local (and sometimes remote or network) resource starvation when
+ running many agent-using remote commands. Thanks to Kevin Tegtmeier for catch
+ & patch.
+- :release:`1.10.0 <2013-03-01>`
+- :feature:`66` Batch SFTP writes to help speed up file transfers. Thanks to
+ Olle Lundberg for the patch.
+- :bug:`133 major` Fix handling of window-change events to be on-spec and not
+ attempt to wait for a response from the remote sshd; this fixes problems with
+ less common targets such as some Cisco devices. Thanks to Phillip Heller for
+ catch & patch.
+- :feature:`93` Overhaul SSH config parsing to be in line with ``man
+ ssh_config`` (& the behavior of ``ssh`` itself), including addition of parameter
+ expansion within config values. Thanks to Olle Lundberg for the patch.
+- :feature:`110` Honor SSH config ``AddressFamily`` setting when looking up
+ local host's FQDN. Thanks to John Hensley for the patch.
+- :feature:`128` Defer FQDN resolution until needed, when parsing SSH config
+ files. Thanks to Parantapa Bhattacharya for catch & patch.
+- :bug:`102 major` Forego random padding for packets when running under
+ ``*-ctr`` ciphers. This corrects some slowdowns on platforms where random
+ byte generation is inefficient (e.g. Windows). Thanks to ``@warthog618`` for
+ catch & patch, and Michael van der Kolff for code/technique review.
+- :feature:`127` Turn ``SFTPFile`` into a context manager. Thanks to Michael
+ Williamson for the patch.
+- :feature:`116` Limit ``Message.get_bytes`` to an upper bound of 1MB to protect
+ against potential DoS vectors. Thanks to ``@mvschaik`` for catch & patch.
+- :feature:`115` Add convenience ``get_pty`` kwarg to ``Client.exec_command`` so
+ users not manually controlling a channel object can still toggle PTY
+ creation. Thanks to Michael van der Kolff for the patch.
+- :feature:`71` Add ``SFTPClient.putfo`` and ``.getfo`` methods to allow direct
+ uploading/downloading of file-like objects. Thanks to Eric Buehl for the
+ patch.
+- :feature:`113` Add ``timeout`` parameter to ``SSHClient.exec_command`` for
+ easier setting of the command's internal channel object's timeout. Thanks to
+ Cernov Vladimir for the patch.
+- :support:`94` Remove duplication of SSH port constant. Thanks to Olle
+ Lundberg for the catch.
+- :feature:`80` Expose the internal "is closed" property of the file transfer
+ class ``BufferedFile`` as ``.closed``, better conforming to Python's file
+ interface. Thanks to ``@smunaut`` and James Hiscock for catch & patch.
diff --git a/sites/www/conf.py b/sites/www/conf.py
new file mode 100644
index 0000000..179f0b7
--- /dev/null
+++ b/sites/www/conf.py
@@ -0,0 +1,28 @@
+# Obtain shared config values
+from pathlib import Path
+import os
+import sys
+
+updir = Path(__file__).parent.parent.resolve()
+sys.path.append(str(updir))
+from shared_conf import *
+
+# Releases changelog extension
+extensions.append("releases")
+releases_release_uri = "https://github.com/paramiko/paramiko/tree/%s"
+releases_issue_uri = "https://github.com/paramiko/paramiko/issues/%s"
+releases_development_branch = "main"
+# Don't show unreleased_X.x sections up top for 1.x or 2.x anymore
+releases_supported_versions = [3]
+
+# Default is 'local' building, but reference the public docs site when building
+# under RTD.
+target = updir / "docs" / "_build"
+if os.environ.get("READTHEDOCS") == "True":
+ target = "http://docs.paramiko.org/en/latest/"
+intersphinx_mapping["docs"] = (str(target), None)
+
+# Sister-site links to API docs
+html_theme_options["extra_nav_links"] = {
+ "API Docs": "http://docs.paramiko.org"
+}
diff --git a/sites/www/contact.rst b/sites/www/contact.rst
new file mode 100644
index 0000000..202815f
--- /dev/null
+++ b/sites/www/contact.rst
@@ -0,0 +1,12 @@
+=======
+Contact
+=======
+
+You can get in touch with the developer & user community in any of the
+following ways:
+
+* Submit contributions on Github - see the :doc:`contributing` page.
+* Follow ``@bitprophet`` on Twitter, though it's not a dedicated account and
+ mostly just retweets funny pictures.
+* Subscribe to the ``paramiko`` category on the developer's blog:
+ http://bitprophet.org/categories/paramiko/
diff --git a/sites/www/contributing.rst b/sites/www/contributing.rst
new file mode 100644
index 0000000..9cf0f34
--- /dev/null
+++ b/sites/www/contributing.rst
@@ -0,0 +1,25 @@
+============
+Contributing
+============
+
+How to get the code
+===================
+
+Our primary Git repository is on Github at `paramiko/paramiko`_;
+please follow their instructions for cloning to your local system. (If you
+intend to submit patches/pull requests, we recommend forking first, then
+cloning your fork. Github has excellent documentation for all this.)
+
+
+How to submit bug reports or new code
+=====================================
+
+Please see `this project-agnostic contribution guide
+<http://contribution-guide.org>`_ - we follow it explicitly. Again, our code
+repository and bug tracker is `on Github`_.
+
+Our changelog is located in ``sites/www/changelog.rst``.
+
+
+.. _paramiko/paramiko:
+.. _on Github: https://github.com/paramiko/paramiko
diff --git a/sites/www/faq.rst b/sites/www/faq.rst
new file mode 100644
index 0000000..74b7501
--- /dev/null
+++ b/sites/www/faq.rst
@@ -0,0 +1,36 @@
+===================================
+Frequently Asked/Answered Questions
+===================================
+
+Which version should I use? I see multiple active releases.
+===========================================================
+
+Please see :ref:`the installation docs <release-lines>` which have an explicit
+section about this topic.
+
+Paramiko doesn't work with my Cisco, Windows or other non-Unix system!
+======================================================================
+
+In an ideal world, the developers would love to support every possible target
+system. Unfortunately, volunteer development time and access to non-mainstream
+platforms are limited, meaning that we can only fully support standard OpenSSH
+implementations such as those found on the average Linux distribution (as well
+as on Mac OS X and \*BSD.)
+
+Because of this, **we typically close bug reports for nonstandard SSH
+implementations or host systems**.
+
+However, **closed does not imply locked** - affected users can still post
+comments on such tickets - and **we will always consider actual patch
+submissions for these issues**, provided they can get +1s from similarly
+affected users and are proven to not break existing functionality.
+
+I'm having strange issues with my code hanging at shutdown!
+===========================================================
+
+Make sure you explicitly ``.close()`` your connection objects (usually
+``SSHClient``) if you're having any sort of hang/freeze at shutdown time!
+
+Doing so isn't strictly necessary 100% of the time, but it is almost always the
+right solution if you run into the various corner cases that cause race
+conditions, etc.
diff --git a/sites/www/index.rst b/sites/www/index.rst
new file mode 100644
index 0000000..9c3bb3a
--- /dev/null
+++ b/sites/www/index.rst
@@ -0,0 +1,13 @@
+.. include:: ../../README.rst
+
+.. toctree::
+ :hidden:
+
+ changelog
+ FAQs <faq>
+ installing
+ installing-1.x
+ contributing
+ contact
+
+
diff --git a/sites/www/installing-1.x.rst b/sites/www/installing-1.x.rst
new file mode 100644
index 0000000..7421a6c
--- /dev/null
+++ b/sites/www/installing-1.x.rst
@@ -0,0 +1,121 @@
+================
+Installing (1.x)
+================
+
+.. note:: Installing Paramiko 2.0 or above? See :doc:`installing` instead.
+
+This document includes legacy notes on installing Paramiko 1.x (specifically,
+1.13 and up). Users are strongly encouraged to upgrade to 2.0 when possible;
+PyCrypto (the dependency covered below) is no longer maintained and contains
+security vulnerabilities.
+
+General install notes
+=====================
+
+* Python 2.6+ and 3.3+ are supported; Python <=2.5 and 3.0-3.2 are **not
+ supported**.
+* See the note in the main install doc about :ref:`release-lines` for details
+ on specific versions you may want to install.
+
+ .. note:: 1.x will eventually be entirely end-of-lifed.
+* Paramiko 1.7-1.14 have only one dependency: :ref:`pycrypto`.
+* Paramiko 1.15+ (not including 2.x and above) add a second, pure-Python
+ dependency: the ``ecdsa`` module, trivially installable via PyPI.
+* Paramiko 1.15+ (again, not including 2.x and up) also allows you to
+ optionally install a few more dependencies to gain support for
+ :ref:`GSS-API/Kerberos <gssapi-on-1x>`.
+* Users on Windows may want to opt for the :ref:`pypm` approach.
+
+
+.. _pycrypto:
+
+PyCrypto
+========
+
+`PyCrypto <https://www.dlitz.net/software/pycrypto/>`__ provides the low-level
+(C-based) encryption algorithms we need to implement the SSH protocol. There
+are a couple gotchas associated with installing PyCrypto: its compatibility
+with Python's package tools, and the fact that it is a C-based extension.
+
+C extension
+-----------
+
+Unless you are installing from a precompiled source such as a Debian apt
+repository or RedHat RPM, or using :ref:`pypm <pypm>`, you will also need the
+ability to build Python C-based modules from source in order to install
+PyCrypto. Users on **Unix-based platforms** such as Ubuntu or Mac OS X will
+need the traditional C build toolchain installed (e.g. Developer Tools / XCode
+Tools on the Mac, or the ``build-essential`` package on Ubuntu or Debian Linux
+-- basically, anything with ``gcc``, ``make`` and so forth) as well as the
+Python development libraries, often named ``python-dev`` or similar.
+
+Slow vs fast crypto math
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+PyCrypto attempts to use the ``gmp`` C math library if it is present on your
+system, which enables what it internally calls "fastmath" (``_fastmath.so``).
+When those headers are not available, it falls back to "slowmath"
+(``_slowmath.py``) which is a pure-Python implementation.
+
+Real-world tests have shown significant benefits to using the C version of this
+code; thus we strongly recommend you install the ``gmp`` development headers
+**before** installing Paramiko/PyCrypto. E.g.::
+
+ $ apt-get install libgmp-dev # or just apt
+ $ yum install gmp-devel # or dnf
+ $ brew install gmp
+
+If you're unsure which version of math you've ended up with, a quick way to
+check is to examine whether ``_fastmath.so`` or ``_slowmath.py`` appears in the
+output of::
+
+ from Crypto.PublicKey import RSA
+ print(RSA._impl._math)
+
+Windows
+~~~~~~~
+
+For **Windows** users we recommend using :ref:`pypm`, installing a C
+development environment such as `Cygwin <http://cygwin.com>`_ or obtaining a
+precompiled Win32 PyCrypto package from `voidspace's Python modules page
+<http://www.voidspace.org.uk/python/modules.shtml#pycrypto>`_.
+
+.. note::
+ Some Windows users whose Python is 64-bit have found that the PyCrypto
+ dependency ``winrandom`` may not install properly, leading to ImportErrors.
+ In this scenario, you'll probably need to compile ``winrandom`` yourself
+ via e.g. MS Visual Studio. See `Fabric #194
+ <https://github.com/fabric/fabric/issues/194>`_ for info.
+
+
+.. _pypm:
+
+ActivePython and PyPM
+=====================
+
+Windows users who already have ActiveState's `ActivePython
+<http://www.activestate.com/activepython/downloads>`_ distribution installed
+may find Paramiko is best installed with `its package manager, PyPM
+<http://code.activestate.com/pypm/>`_. Below is example output from an
+installation of Paramiko via ``pypm``::
+
+ C:\> pypm install paramiko
+ The following packages will be installed into "%APPDATA%\Python" (2.7):
+ paramiko-1.7.8 pycrypto-2.4
+ Get: [pypm-free.activestate.com] paramiko 1.7.8
+ Get: [pypm-free.activestate.com] pycrypto 2.4
+ Installing paramiko-1.7.8
+ Installing pycrypto-2.4
+ C:\>
+
+
+.. _gssapi-on-1x:
+
+Optional dependencies for GSS-API / SSPI / Kerberos
+===================================================
+
+First, see the main install doc's notes: :ref:`gssapi` - everything there is
+required for Paramiko 1.x as well.
+
+Additionally, users of Paramiko 1.x, on all platforms, need a final dependency:
+`pyasn1 <https://pypi.org/project/pyasn1/>`_ ``0.1.7`` or better.
diff --git a/sites/www/installing.rst b/sites/www/installing.rst
new file mode 100644
index 0000000..98c0655
--- /dev/null
+++ b/sites/www/installing.rst
@@ -0,0 +1,152 @@
+==========
+Installing
+==========
+
+
+.. note::
+ These instructions cover Paramiko 2.0 and above. If you're looking to
+ install Paramiko 1.x, see :doc:`installing-1.x`. However, **the 1.x line
+ relies on insecure dependencies** so upgrading is strongly encouraged.
+
+
+.. _paramiko-itself:
+
+Paramiko itself
+===============
+
+The recommended way to get Paramiko is to **install the latest stable release**
+via `pip <http://pip-installer.org>`_::
+
+ $ pip install paramiko
+
+We currently support **Python 3.6+ only**. Users on older interpreter versions
+are urged to upgrade.
+
+Paramiko has only a few **direct dependencies**:
+
+- The big one, with its own sub-dependencies, is Cryptography; see :ref:`its
+ specific note below <cryptography>` for more details;
+- `bcrypt <https://pypi.org/project/bcrypt/>`_ and `pynacl
+ <https://pypi.org/project/PyNaCl/>`_ for Ed25519 key support.
+
+There are also a handful of **optional dependencies** you may install using
+`setuptools 'extras'
+<https://packaging.python.org/tutorials/installing-packages/#installing-setuptools-extras>`_:
+
+- If you want all optional dependencies at once, use ``paramiko[all]``.
+- For ``Match exec`` config support and/or ``ProxyCommand`` feature support,
+ use ``paramiko[invoke]`` (which installs `Invoke
+ <https://www.pyinvoke.org>`_).
+- For GSS-API / SSPI support, use ``paramiko[gssapi]``, though also see
+ :ref:`the below subsection on it <gssapi>` for details.
+
+
+.. _release-lines:
+
+Release lines
+-------------
+
+Users desiring stability may wish to pin themselves to a specific release line
+once they first start using Paramiko; to assist in this, we guarantee bugfixes
+for the last 2-3 releases including the latest stable one.
+
+This typically spans major & minor versions, so even if e.g. 3.1 is the latest
+stable release, it's likely that bugfixes will occasionally come out for the
+latest 2.x and perhaps even 1.x releases, as well as for 3.0. New feature
+releases for previous major-version lines are less likely but not unheard of.
+
+If you're unsure which version to install:
+
+* **Completely new users** should always default to the **latest stable
+ release** (as above, whatever is newest / whatever shows up with ``pip
+ install paramiko``.)
+* **Users upgrading from a much older version** (e.g. 1.7.x through 1.10.x)
+ should probably get the **oldest actively supported line** (check the
+ :doc:`changelog` for recent releases).
+* **Everybody else** is hopefully already "on" a given version and can
+ carefully upgrade to whichever version they care to, when their release line
+ stops being supported.
+
+
+.. _cryptography:
+
+Cryptography
+============
+
+`Cryptography <https://cryptography.io>`__ provides the low-level (C-based)
+encryption algorithms we need to implement the SSH protocol. It has detailed
+`installation instructions`_ (and an `FAQ
+<https://cryptography.io/en/latest/faq/>`_) which you should read carefully.
+
+In general, you'll need one of the following setups:
+
+* On Windows or Mac OS X, provided your ``pip`` is modern (8.x+): nothing else
+ is required. ``pip`` will install statically compiled binary archives of
+ Cryptography & its dependencies.
+* On Linux, or on other platforms with older versions of ``pip``: you'll need a
+ C build toolchain, plus development headers for Python, OpenSSL and
+ ``libffi``. Again, see `Cryptography's install docs`_; these requirements may
+ occasionally change.
+
+ .. warning::
+ If you go this route, note that **OpenSSL 1.0.1 or newer is effectively
+ required**. Cryptography 1.3 and older technically allow OpenSSL 0.9.8, but
+ 1.4 and newer - which Paramiko will gladly install or upgrade, if you e.g.
+ ``pip install -U`` - drop that support.
+
+* Similarly, Cryptography 3.4 and above require Rust language tooling to
+ install from source; once again see Cryptography's documentation for details
+ here, such as `their Rust install section`_ and `this FAQ entry`_.
+
+.. _installation instructions:
+.. _Cryptography's install docs: https://cryptography.io/en/latest/installation.html
+.. _their Rust install section: https://cryptography.io/en/latest/installation.html#rust
+.. _this FAQ entry: https://cryptography.io/en/latest/faq.html#installing-cryptography-fails-with-error-can-not-find-rust-compiler
+
+
+.. _gssapi:
+
+Optional dependencies for GSS-API / SSPI / Kerberos
+===================================================
+
+In order to use GSS-API/Kerberos & related functionality, additional
+dependencies are required. It hopefully goes without saying but **all
+platforms** need **a working installation of GSS-API itself**, e.g. Heimdal.
+
+.. note::
+ If you use Microsoft SSPI for kerberos authentication and credential
+ delegation, make sure that the target host is trusted for delegation in the
+ active directory configuration. For details see:
+ http://technet.microsoft.com/en-us/library/cc738491%28v=ws.10%29.aspx
+
+The ``gssapi`` "extra" install flavor
+-------------------------------------
+
+If you're installing via ``pip`` (recommended), you should be able to get the
+optional Python package requirements by changing your installation to refer to
+``paramiko[gssapi]`` (from simply ``paramiko``), e.g.::
+
+ pip install "paramiko[gssapi]"
+
+(Or update your ``requirements.txt``, or etc.)
+
+
+.. TODO: just axe this once legacy gssapi support is gone, no point reiterating
+
+Manual dependency installation
+------------------------------
+
+If you're not using ``pip`` or your ``pip`` is too old to support the "extras"
+functionality, the optional dependencies are as follows:
+
+* All platforms need `pyasn1 <https://pypi.org/project/pyasn1/>`_ ``0.1.7`` or
+ later.
+* **Unix** needs: `gssapi <https://pypi.org/project/gssapi/>`__ ``1.4.1`` or better.
+
+ * An alternative is the `python-gssapi
+ <https://pypi.org/project/python-gssapi/>`_ library (``0.6.1`` or above),
+ though it is no longer maintained upstream, and Paramiko's support for
+ its API may eventually become deprecated.
+
+* **Windows** needs `pywin32 <https://pypi.python.org/pypi/pywin32>`_ ``2.1.8``
+ or better.
diff --git a/tasks.py b/tasks.py
new file mode 100644
index 0000000..361d9cd
--- /dev/null
+++ b/tasks.py
@@ -0,0 +1,163 @@
+import os
+from pathlib import Path
+from os.path import join
+from shutil import rmtree, copytree
+
+from invoke import Collection, task
+from invocations import checks
+from invocations.docs import docs, www, sites, watch_docs
+from invocations.packaging.release import ns as release_coll, publish
+from invocations.testing import count_errors
+
+
+# TODO: this screams out for the invoke missing-feature of "I just wrap task X,
+# assume its signature by default" (even if that is just **kwargs support)
+@task
+def test(
+ ctx,
+ verbose=True,
+ color=True,
+ capture="sys",
+ module=None,
+ k=None,
+ x=False,
+ opts="",
+ coverage=False,
+ include_slow=False,
+ loop_on_fail=False,
+):
+ """
+ Run unit tests via pytest.
+
+ By default, known-slow parts of the suite are SKIPPED unless
+ ``--include-slow`` is given. (Note that ``--include-slow`` does not mesh
+ well with explicit ``--opts="-m=xxx"`` - if ``-m`` is found in ``--opts``,
+ ``--include-slow`` will be ignored!)
+ """
+ if verbose and "--verbose" not in opts and "-v" not in opts:
+ opts += " --verbose"
+ # TODO: forget why invocations.pytest added this; is it to force color when
+ # running headless? Probably?
+ if color:
+ opts += " --color=yes"
+ opts += " --capture={}".format(capture)
+ if "-m" not in opts and not include_slow:
+ opts += " -m 'not slow'"
+ if k is not None and not ("-k" in opts if opts else False):
+ opts += " -k {}".format(k)
+ if x and not ("-x" in opts if opts else False):
+ opts += " -x"
+ if loop_on_fail and not ("-f" in opts if opts else False):
+ opts += " -f"
+ modstr = ""
+ if module is not None:
+ base = f"{module}.py"
+ tests = Path("tests")
+ legacy = tests / f"test_{base}"
+ modstr = str(legacy if legacy.exists() else tests / base)
+ # Switch runner depending on coverage or no coverage.
+ # TODO: get pytest's coverage plugin working, IIRC it has issues?
+ runner = "pytest"
+ if coverage:
+ # Leverage how pytest can be run as 'python -m pytest', and then how
+ # coverage can be told to run things in that manner instead of
+ # expecting a literal .py file.
+ runner = "coverage run -m pytest"
+ # Strip SSH_AUTH_SOCK from parent env to avoid pollution by interactive
+ # users.
+ # TODO: once pytest coverage plugin works, see if there's a pytest-native
+ # way to handle the env stuff too, then we can remove these tasks entirely
+ # in favor of just "run pytest"?
+ env = dict(os.environ)
+ if "SSH_AUTH_SOCK" in env:
+ del env["SSH_AUTH_SOCK"]
+ cmd = "{} {} {}".format(runner, opts, modstr)
+ # NOTE: we have a pytest.ini and tend to use that over PYTEST_ADDOPTS.
+ ctx.run(cmd, pty=True, env=env, replace_env=True)
+
+
+@task
+def coverage(ctx, opts=""):
+ """
+ Execute all tests (normal and slow) with coverage enabled.
+ """
+ test(ctx, coverage=True, include_slow=True, opts=opts)
+ # NOTE: codecov now handled purely in invocations/orb
+
+
+@task
+def guard(ctx, opts=""):
+ """
+ Execute all tests and then watch for changes, re-running.
+ """
+ # TODO if coverage was run via pytest-cov, we could add coverage here too
+ return test(ctx, include_slow=True, loop_on_fail=True, opts=opts)
+
+
+# Until we stop bundling docs w/ releases. Need to discover use cases first.
+# TODO: would be nice to tie this into our own version of build() too, but
+# still have publish() use that build()...really need to try out classes!
+# TODO 4.0: I'd like to just axe the 'built docs in sdist', none of my other
+# projects do it.
+@task
+def publish_(
+ ctx, sdist=True, wheel=True, sign=False, dry_run=False, index=None
+):
+ """
+ Wraps invocations.packaging.publish to add baked-in docs folder.
+ """
+ # Build docs first. Use terribad workaround pending invoke #146
+ ctx.run("inv docs", pty=True, hide=False)
+ # Move the built docs into where Epydocs used to live
+ target = "docs"
+ rmtree(target, ignore_errors=True)
+ # TODO: make it easier to yank out this config val from the docs coll
+ copytree("sites/docs/_build", target)
+ # Publish
+ publish(
+ ctx, sdist=sdist, wheel=wheel, sign=sign, dry_run=dry_run, index=index
+ )
+
+
+# Also have to hack up the newly enhanced all_() so it uses our publish
+@task(name="all", default=True)
+def all_(c, dry_run=False):
+ release_coll["prepare"](c, dry_run=dry_run)
+ publish_(c, dry_run=dry_run)
+ release_coll["push"](c, dry_run=dry_run)
+
+
+# TODO: "replace one task with another" needs a better public API, this is
+# using unpublished internals & skips all the stuff add_task() does re:
+# aliasing, defaults etc.
+release_coll.tasks["publish"] = publish_
+release_coll.tasks["all"] = all_
+
+ns = Collection(
+ test,
+ coverage,
+ guard,
+ release_coll,
+ docs,
+ www,
+ watch_docs,
+ sites,
+ count_errors,
+ checks.blacken,
+ checks,
+)
+ns.configure(
+ {
+ "packaging": {
+ # NOTE: many of these are also set in kwarg defaults above; but
+ # having them here too means once we get rid of our custom
+ # release(), the behavior stays.
+ "sign": False,
+ "wheel": True,
+ "changelog_file": join(
+ www.configuration()["sphinx"]["source"], "changelog.rst"
+ ),
+ },
+ "docs": {"browse": "remote"},
+ }
+)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..f43975d
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,56 @@
+"""Base classes and helpers for testing paramiko."""
+
+import functools
+import locale
+import os
+
+from pytest import skip
+
+
+# List of locales which have non-ascii characters in all categories.
+# Omits most European languages which for instance may have only some months
+# with names that include accented characters.
+_non_ascii_locales = [
+ # East Asian locales
+ "ja_JP",
+ "ko_KR",
+ "zh_CN",
+ "zh_TW",
+ # European locales with non-latin alphabets
+ "el_GR",
+ "ru_RU",
+ "uk_UA",
+]
+# Also include UTF-8 versions of these locales
+_non_ascii_locales.extend([name + ".utf8" for name in _non_ascii_locales])
+
+
+def requireNonAsciiLocale(category_name="LC_ALL"):
+ """Run decorated test under a non-ascii locale or skip if not possible."""
+ if os.name != "posix":
+ return skip("Non-posix OSes don't really use C locales")
+ cat = getattr(locale, category_name)
+ return functools.partial(_decorate_with_locale, cat, _non_ascii_locales)
+
+
+def _decorate_with_locale(category, try_locales, test_method):
+ """Decorate test_method to run after switching to a different locale."""
+
+ def _test_under_locale(testself, *args, **kwargs):
+ original = locale.setlocale(category)
+ while try_locales:
+ try:
+ locale.setlocale(category, try_locales[0])
+ except locale.Error:
+ # Mutating original list is ok, setlocale would keep failing
+ try_locales.pop(0)
+ else:
+ try:
+ return test_method(testself, *args, **kwargs)
+ finally:
+ locale.setlocale(category, original)
+ # No locales could be used? Just skip the decorated test :(
+ skip("No usable locales installed")
+
+ functools.update_wrapper(_test_under_locale, test_method)
+ return _test_under_locale
diff --git a/tests/_loop.py b/tests/_loop.py
new file mode 100644
index 0000000..a374001
--- /dev/null
+++ b/tests/_loop.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import socket
+import threading
+
+from paramiko.util import asbytes
+
+
+class LoopSocket:
+ """
+ A LoopSocket looks like a normal socket, but all data written to it is
+ delivered on the read-end of another LoopSocket, and vice versa. It's
+ like a software "socketpair".
+ """
+
+ def __init__(self):
+ self.__in_buffer = bytes()
+ self.__lock = threading.Lock()
+ self.__cv = threading.Condition(self.__lock)
+ self.__timeout = None
+ self.__mate = None
+ self._closed = False
+
+ def close(self):
+ self.__unlink()
+ self._closed = True
+ try:
+ self.__lock.acquire()
+ self.__in_buffer = bytes()
+ finally:
+ self.__lock.release()
+
+ def send(self, data):
+ data = asbytes(data)
+ if self.__mate is None:
+ # EOF
+ raise EOFError()
+ self.__mate.__feed(data)
+ return len(data)
+
+ def recv(self, n):
+ self.__lock.acquire()
+ try:
+ if self.__mate is None:
+ # EOF
+ return bytes()
+ if len(self.__in_buffer) == 0:
+ self.__cv.wait(self.__timeout)
+ if len(self.__in_buffer) == 0:
+ raise socket.timeout
+ out = self.__in_buffer[:n]
+ self.__in_buffer = self.__in_buffer[n:]
+ return out
+ finally:
+ self.__lock.release()
+
+ def settimeout(self, n):
+ self.__timeout = n
+
+ def link(self, other):
+ self.__mate = other
+ self.__mate.__mate = self
+
+ def __feed(self, data):
+ self.__lock.acquire()
+ try:
+ self.__in_buffer += data
+ self.__cv.notify_all()
+ finally:
+ self.__lock.release()
+
+ def __unlink(self):
+ m = None
+ self.__lock.acquire()
+ try:
+ if self.__mate is not None:
+ m = self.__mate
+ self.__mate = None
+ finally:
+ self.__lock.release()
+ if m is not None:
+ m.__unlink()
diff --git a/tests/_stub_sftp.py b/tests/_stub_sftp.py
new file mode 100644
index 0000000..0c0372e
--- /dev/null
+++ b/tests/_stub_sftp.py
@@ -0,0 +1,232 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+A stub SFTP server for loopback SFTP testing.
+"""
+
+import os
+
+from paramiko import (
+ AUTH_SUCCESSFUL,
+ OPEN_SUCCEEDED,
+ SFTPAttributes,
+ SFTPHandle,
+ SFTPServer,
+ SFTPServerInterface,
+ SFTP_FAILURE,
+ SFTP_OK,
+ ServerInterface,
+)
+from paramiko.common import o666
+
+
+class StubServer(ServerInterface):
+ def check_auth_password(self, username, password):
+ # all are allowed
+ return AUTH_SUCCESSFUL
+
+ def check_channel_request(self, kind, chanid):
+ return OPEN_SUCCEEDED
+
+
+class StubSFTPHandle(SFTPHandle):
+ def stat(self):
+ try:
+ return SFTPAttributes.from_stat(os.fstat(self.readfile.fileno()))
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+
+ def chattr(self, attr):
+ # python doesn't have equivalents to fchown or fchmod, so we have to
+ # use the stored filename
+ try:
+ SFTPServer.set_file_attr(self.filename, attr)
+ return SFTP_OK
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+
+
+class StubSFTPServer(SFTPServerInterface):
+ # assume current folder is a fine root
+ # (the tests always create and eventually delete a subfolder, so there
+ # shouldn't be any mess)
+ ROOT = os.getcwd()
+
+ def _realpath(self, path):
+ return self.ROOT + self.canonicalize(path)
+
+ def list_folder(self, path):
+ path = self._realpath(path)
+ try:
+ out = []
+ flist = os.listdir(path)
+ for fname in flist:
+ attr = SFTPAttributes.from_stat(
+ os.stat(os.path.join(path, fname))
+ )
+ attr.filename = fname
+ out.append(attr)
+ return out
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+
+ def stat(self, path):
+ path = self._realpath(path)
+ try:
+ return SFTPAttributes.from_stat(os.stat(path))
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+
+ def lstat(self, path):
+ path = self._realpath(path)
+ try:
+ return SFTPAttributes.from_stat(os.lstat(path))
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+
+ def open(self, path, flags, attr):
+ path = self._realpath(path)
+ try:
+ binary_flag = getattr(os, "O_BINARY", 0)
+ flags |= binary_flag
+ mode = getattr(attr, "st_mode", None)
+ if mode is not None:
+ fd = os.open(path, flags, mode)
+ else:
+ # os.open() defaults to 0777 which is
+ # an odd default mode for files
+ fd = os.open(path, flags, o666)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ if (flags & os.O_CREAT) and (attr is not None):
+ attr._flags &= ~attr.FLAG_PERMISSIONS
+ SFTPServer.set_file_attr(path, attr)
+ if flags & os.O_WRONLY:
+ if flags & os.O_APPEND:
+ fstr = "ab"
+ else:
+ fstr = "wb"
+ elif flags & os.O_RDWR:
+ if flags & os.O_APPEND:
+ fstr = "a+b"
+ else:
+ fstr = "r+b"
+ else:
+ # O_RDONLY (== 0)
+ fstr = "rb"
+ try:
+ f = os.fdopen(fd, fstr)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ fobj = StubSFTPHandle(flags)
+ fobj.filename = path
+ fobj.readfile = f
+ fobj.writefile = f
+ return fobj
+
+ def remove(self, path):
+ path = self._realpath(path)
+ try:
+ os.remove(path)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ return SFTP_OK
+
+ def rename(self, oldpath, newpath):
+ oldpath = self._realpath(oldpath)
+ newpath = self._realpath(newpath)
+ if os.path.exists(newpath):
+ return SFTP_FAILURE
+ try:
+ os.rename(oldpath, newpath)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ return SFTP_OK
+
+ def posix_rename(self, oldpath, newpath):
+ oldpath = self._realpath(oldpath)
+ newpath = self._realpath(newpath)
+ try:
+ os.rename(oldpath, newpath)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ return SFTP_OK
+
+ def mkdir(self, path, attr):
+ path = self._realpath(path)
+ try:
+ os.mkdir(path)
+ if attr is not None:
+ SFTPServer.set_file_attr(path, attr)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ return SFTP_OK
+
+ def rmdir(self, path):
+ path = self._realpath(path)
+ try:
+ os.rmdir(path)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ return SFTP_OK
+
+ def chattr(self, path, attr):
+ path = self._realpath(path)
+ try:
+ SFTPServer.set_file_attr(path, attr)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ return SFTP_OK
+
+ def symlink(self, target_path, path):
+ path = self._realpath(path)
+ if (len(target_path) > 0) and (target_path[0] == "/"):
+ # absolute symlink
+ target_path = os.path.join(self.ROOT, target_path[1:])
+ if target_path[:2] == "//":
+ # bug in os.path.join
+ target_path = target_path[1:]
+ else:
+ # compute relative to path
+ abspath = os.path.join(os.path.dirname(path), target_path)
+ if abspath[: len(self.ROOT)] != self.ROOT:
+ # this symlink isn't going to work anyway -- just break it
+ # immediately
+ target_path = "<error>"
+ try:
+ os.symlink(target_path, path)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ return SFTP_OK
+
+ def readlink(self, path):
+ path = self._realpath(path)
+ try:
+ symlink = os.readlink(path)
+ except OSError as e:
+ return SFTPServer.convert_errno(e.errno)
+ # if it's absolute, remove the root
+ if os.path.isabs(symlink):
+ if symlink[: len(self.ROOT)] == self.ROOT:
+ symlink = symlink[len(self.ROOT) :]
+ if (len(symlink) == 0) or (symlink[0] != "/"):
+ symlink = "/" + symlink
+ else:
+ symlink = "<error>"
+ return symlink
diff --git a/tests/_support/dss.key b/tests/_support/dss.key
new file mode 100644
index 0000000..e10807f
--- /dev/null
+++ b/tests/_support/dss.key
@@ -0,0 +1,12 @@
+-----BEGIN DSA PRIVATE KEY-----
+MIIBuwIBAAKBgQDngaYDZ30c6/7cJgEEbtl8FgKdwhba1Z7oOrOn4MI/6C42G1bY
+wMuqZf4dBCglsdq39SHrcjbE8Vq54gPSOh3g4+uV9Rcg5IOoPLbwp2jQfF6f1FIb
+sx7hrDCIqUcQccPSxetPBKmXI9RN8rZLaFuQeTnI65BKM98Ruwvq6SI2LwIVAPDP
+hSeawaJI27mKqOfe5PPBSmyHAoGBAJMXxXmPD9sGaQ419DIpmZecJKBUAy9uXD8x
+gbgeDpwfDaFJP8owByCKREocPFfi86LjCuQkyUKOfjYMN6iHIf1oEZjB8uJAatUr
+FzI0ArXtUqOhwTLwTyFuUojE5own2WYsOAGByvgfyWjsGhvckYNhI4ODpNdPlxQ8
+ZamaPGPsAoGARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmn
+jO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacI
+BlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgECFGI9QPSc
+h9pT9XHqn+1rZ4bK+QGA
+-----END DSA PRIVATE KEY-----
diff --git a/tests/_support/dss.key-cert.pub b/tests/_support/dss.key-cert.pub
new file mode 100644
index 0000000..07fd557
--- /dev/null
+++ b/tests/_support/dss.key-cert.pub
@@ -0,0 +1 @@
+ssh-dss-cert-v01@openssh.com AAAAHHNzaC1kc3MtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgJA3GjLmg6JbIWxokW/c827lmPOSvSfPDIY586yICFqIAAACBAOeBpgNnfRzr/twmAQRu2XwWAp3CFtrVnug6s6fgwj/oLjYbVtjAy6pl/h0EKCWx2rf1IetyNsTxWrniA9I6HeDj65X1FyDkg6g8tvCnaNB8Xp/UUhuzHuGsMIipRxBxw9LF608EqZcj1E3ytktoW5B5OcjrkEoz3xG7C+rpIjYvAAAAFQDwz4UnmsGiSNu5iqjn3uTzwUpshwAAAIEAkxfFeY8P2wZpDjX0MimZl5wkoFQDL25cPzGBuB4OnB8NoUk/yjAHIIpEShw8V+LzouMK5CTJQo5+Ngw3qIch/WgRmMHy4kBq1SsXMjQCte1So6HBMvBPIW5SiMTmjCfZZiw4AYHK+B/JaOwaG9yRg2Ejg4Ok10+XFDxlqZo8Y+wAAACARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmnjO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacIBlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgEAAAAAAAAAAAAAAAEAAAAJdXNlcl90ZXN0AAAACAAAAAR0ZXN0AAAAAAAAAAD//////////wAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAAAAAAAAAAAAAEXAAAAB3NzaC1yc2EAAAADAQABAAABAQDskr46Umjxh3wo7PoPQsSVS3xt6+5PhwmXrnVtBBnkOo+zHRwQo8G8sY+Lc6oOOzA5GCSawKOwqE305GIDfB8/L9EKOkAjdN18imDjw/YuJFA4bl9yFhsXrCb1GZPJw0pJ0H0Eid9EldyMQAhGE49MWvnFMQl1TgO6YWq/g71xAFimge0LvVWijlbMy7O+nsGxSpinIprV5S9Viv8XC/ku89tadZfca1uxq751aGfAWGeYrVytpUl8UO0ggqH6BaUvkDU7rWh2n5RHUTvgzceKWnz5wqd8BngK37WmJjAgCtHCJS5ZRf6oJGj2QVcqc6cjvEFWsCuOKB4KAjktauWxAAABDwAAAAdzc2gtcnNhAAABAK6jweL231fRhFoybEGTOXJfj0lx55KpDsw9Q1rBvZhrSgwUr2dFr9HVcKe44mTC7CMtdW5VcyB67l1fnMil/D/e4zYxI0PvbW6RxLFNqvvtxBu5sGt5B7uzV4aAV31TpWR0l5RwwpZqc0NUlTx7oMutN1BDrPqW70QZ/iTEwalkn5fo1JWej0cf4BdC9VgYDLnprx0KN3IToukbszRQySnuR6MQUfj0m7lUloJfF3rq8G0kNxWqDGoJilMhO5Lqu9wAhlZWdouypI6bViO6+ToCVixLNUYs3EfS1zCxvXpiyMvh6rZofJ6WqzUuSd4Mzb2Ka4ocTKi7kynF+OG0Ivo= tests/test_dss.key.pub
diff --git a/tests/_support/ecdsa-256.key b/tests/_support/ecdsa-256.key
new file mode 100644
index 0000000..42d4473
--- /dev/null
+++ b/tests/_support/ecdsa-256.key
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIKB6ty3yVyKEnfF/zprx0qwC76MsMlHY4HXCnqho2eKioAoGCCqGSM49
+AwEHoUQDQgAElI9mbdlaS+T9nHxY/59lFnn80EEecZDBHq4gLpccY8Mge5ZTMiMD
+ADRvOqQ5R98Sxst765CAqXmRtz8vwoD96g==
+-----END EC PRIVATE KEY-----
diff --git a/tests/_support/ecdsa-256.key-cert.pub b/tests/_support/ecdsa-256.key-cert.pub
new file mode 100644
index 0000000..f2c93cc
--- /dev/null
+++ b/tests/_support/ecdsa-256.key-cert.pub
@@ -0,0 +1 @@
+ecdsa-sha2-nistp256-cert-v01@openssh.com AAAAKGVjZHNhLXNoYTItbmlzdHAyNTYtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgJ+ZkRXedIWPl9y6fvel60p47ys5WgwMSjiwzJ2Ho+4MAAAAIbmlzdHAyNTYAAABBBJSPZm3ZWkvk/Zx8WP+fZRZ5/NBBHnGQwR6uIC6XHGPDIHuWUzIjAwA0bzqkOUffEsbLe+uQgKl5kbc/L8KA/eoAAAAAAAAAAAAAAAEAAAAJdXNlcl90ZXN0AAAACAAAAAR0ZXN0AAAAAAAAAAD//////////wAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAAAAAAAAAAAAAEXAAAAB3NzaC1yc2EAAAADAQABAAABAQDskr46Umjxh3wo7PoPQsSVS3xt6+5PhwmXrnVtBBnkOo+zHRwQo8G8sY+Lc6oOOzA5GCSawKOwqE305GIDfB8/L9EKOkAjdN18imDjw/YuJFA4bl9yFhsXrCb1GZPJw0pJ0H0Eid9EldyMQAhGE49MWvnFMQl1TgO6YWq/g71xAFimge0LvVWijlbMy7O+nsGxSpinIprV5S9Viv8XC/ku89tadZfca1uxq751aGfAWGeYrVytpUl8UO0ggqH6BaUvkDU7rWh2n5RHUTvgzceKWnz5wqd8BngK37WmJjAgCtHCJS5ZRf6oJGj2QVcqc6cjvEFWsCuOKB4KAjktauWxAAABDwAAAAdzc2gtcnNhAAABALdnEil8XIFkcgLZgYwS2cIQPHetUzMNxYCqzk7mSfVpCaIYNTr27RG+f+sD0cerdAIUUvhCT7iA82/Y7wzwkO2RUBi61ATfw9DDPPRQTDfix1SSRwbmPB/nVI1HlPMCEs6y48PFaBZqXwJPS3qycgSxoTBhaLCLzT+r6HRaibY7kiRLDeL3/WHyasK2PRdcYJ6KrLd0ctQcUHZCLK3fJfMfuQRg8MZLVrmK3fHStCXHpRFueRxUhZjaiS9evA/NtzEQhf46JDClQ2rLYpSqSg7QUR/rKwqWWyMuQkOHmlJw797VVa+ZzpUFXP7ekWel3FaBj8IHiimIA7Jm6dOCLm4= tests/test_ecdsa_256.key.pub
diff --git a/tests/_support/ed25519.key b/tests/_support/ed25519.key
new file mode 100644
index 0000000..eb9f94c
--- /dev/null
+++ b/tests/_support/ed25519.key
@@ -0,0 +1,8 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACB69SvZKJh/9VgSL0G27b5xVYa8nethH3IERbi0YqJDXwAAAKhjwAdrY8AH
+awAAAAtzc2gtZWQyNTUxOQAAACB69SvZKJh/9VgSL0G27b5xVYa8nethH3IERbi0YqJDXw
+AAAEA9tGQi2IrprbOSbDCF+RmAHd6meNSXBUQ2ekKXm4/8xnr1K9komH/1WBIvQbbtvnFV
+hryd62EfcgRFuLRiokNfAAAAI2FsZXhfZ2F5bm9yQEFsZXhzLU1hY0Jvb2stQWlyLmxvY2
+FsAQI=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/_support/ed25519.key-cert.pub b/tests/_support/ed25519.key-cert.pub
new file mode 100644
index 0000000..4e01415
--- /dev/null
+++ b/tests/_support/ed25519.key-cert.pub
@@ -0,0 +1 @@
+ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIjBkc8l1X887CLBHraU+d6/74Hxr9oa+3HC0iioecZ6AAAAIHr1K9komH/1WBIvQbbtvnFVhryd62EfcgRFuLRiokNfAAAAAAAAAAAAAAABAAAACXVzZXJfdGVzdAAAAAgAAAAEdGVzdAAAAAAAAAAA//////////8AAAAAAAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEA7JK+OlJo8Yd8KOz6D0LElUt8bevuT4cJl651bQQZ5DqPsx0cEKPBvLGPi3OqDjswORgkmsCjsKhN9ORiA3wfPy/RCjpAI3TdfIpg48P2LiRQOG5fchYbF6wm9RmTycNKSdB9BInfRJXcjEAIRhOPTFr5xTEJdU4DumFqv4O9cQBYpoHtC71Voo5WzMuzvp7BsUqYpyKa1eUvVYr/Fwv5LvPbWnWX3Gtbsau+dWhnwFhnmK1craVJfFDtIIKh+gWlL5A1O61odp+UR1E74M3Hilp8+cKnfAZ4Ct+1piYwIArRwiUuWUX+qCRo9kFXKnOnI7xBVrArjigeCgI5LWrlsQAAAQ8AAAAHc3NoLXJzYQAAAQCNfYITv/GCW42fLI89x0pKpXIET/xHIBVan5S3fy5SZq9gLG1Db9g/FITDfOVA7OX8mU/91rucHGtuEi3isILdNFrCcoLEml289tyyluUbbFD5fjvBchMWBkYPwrOPfEzSs299Yk8ZgfV1pjWlndfV54s4c9pinkGu8c0Vzc6stEbWkdmoOHE8su3ogUPg/hOygDzJ+ZOgP5HIUJ6YgkgVpWgZm7zofwdZfa2HEb+WhZaKfMK1UCw1UiSBVk9dx6qzF9m243tHwSHraXvb9oJ1wT1S/MypTbP4RT4fHN8koYNrv2szEBN+lkRgk1D7xaxS/Md2TJsau9ho/UCXSR8L tests/test_ed25519.key.pub
diff --git a/tests/_support/ed448.key b/tests/_support/ed448.key
new file mode 100644
index 0000000..887b51c
--- /dev/null
+++ b/tests/_support/ed448.key
@@ -0,0 +1,4 @@
+-----BEGIN PRIVATE KEY-----
+MEcCAQAwBQYDK2VxBDsEOcvcl9IoD0ktR5RWtW84NM7O2e4LmD2cWfRg7Wht/OA9
+POkmRW12VNvlP6BsXKir5yygumIjD91SQQ==
+-----END PRIVATE KEY-----
diff --git a/tests/_support/rsa-lonely.key b/tests/_support/rsa-lonely.key
new file mode 100644
index 0000000..f50e9c5
--- /dev/null
+++ b/tests/_support/rsa-lonely.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
+oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
+d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
+gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
+EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
+soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
+tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
+avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
+4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
+H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
+qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
+HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
+nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
+-----END RSA PRIVATE KEY-----
diff --git a/tests/_support/rsa-missing.key-cert.pub b/tests/_support/rsa-missing.key-cert.pub
new file mode 100644
index 0000000..7487ab6
--- /dev/null
+++ b/tests/_support/rsa-missing.key-cert.pub
@@ -0,0 +1 @@
+ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgsZlXTd5NE4uzGAn6TyAqQj+IPbsTEFGap2x5pTRwQR8AAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4cAAAAAAAAE0gAAAAEAAAAmU2FtcGxlIHNlbGYtc2lnbmVkIE9wZW5TU0ggY2VydGlmaWNhdGUAAAASAAAABXVzZXIxAAAABXVzZXIyAAAAAAAAAAD//////////wAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAAAAAAAAAAAAACVAAAAB3NzaC1yc2EAAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4cAAACPAAAAB3NzaC1yc2EAAACATFHFsARDgQevc6YLxNnDNjsFtZ08KPMyYVx0w5xm95IVZHVWSOc5w+ccjqN9HRwxV3kP7IvL91qx0Uc3MJdB9g/O6HkAP+rpxTVoTb2EAMekwp5+i8nQJW4CN2BSsbQY1M6r7OBZ5nmF4hOW/5Pu4l22lXe2ydy8kEXOEuRpUeQ= test_rsa.key.pub
diff --git a/tests/_support/rsa.key b/tests/_support/rsa.key
new file mode 100644
index 0000000..f50e9c5
--- /dev/null
+++ b/tests/_support/rsa.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
+oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
+d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
+gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
+EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
+soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
+tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
+avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
+4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
+H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
+qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
+HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
+nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
+-----END RSA PRIVATE KEY-----
diff --git a/tests/_support/rsa.key-cert.pub b/tests/_support/rsa.key-cert.pub
new file mode 100644
index 0000000..7487ab6
--- /dev/null
+++ b/tests/_support/rsa.key-cert.pub
@@ -0,0 +1 @@
+ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgsZlXTd5NE4uzGAn6TyAqQj+IPbsTEFGap2x5pTRwQR8AAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4cAAAAAAAAE0gAAAAEAAAAmU2FtcGxlIHNlbGYtc2lnbmVkIE9wZW5TU0ggY2VydGlmaWNhdGUAAAASAAAABXVzZXIxAAAABXVzZXIyAAAAAAAAAAD//////////wAAAAAAAACCAAAAFXBlcm1pdC1YMTEtZm9yd2FyZGluZwAAAAAAAAAXcGVybWl0LWFnZW50LWZvcndhcmRpbmcAAAAAAAAAFnBlcm1pdC1wb3J0LWZvcndhcmRpbmcAAAAAAAAACnBlcm1pdC1wdHkAAAAAAAAADnBlcm1pdC11c2VyLXJjAAAAAAAAAAAAAACVAAAAB3NzaC1yc2EAAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4cAAACPAAAAB3NzaC1yc2EAAACATFHFsARDgQevc6YLxNnDNjsFtZ08KPMyYVx0w5xm95IVZHVWSOc5w+ccjqN9HRwxV3kP7IvL91qx0Uc3MJdB9g/O6HkAP+rpxTVoTb2EAMekwp5+i8nQJW4CN2BSsbQY1M6r7OBZ5nmF4hOW/5Pu4l22lXe2ydy8kEXOEuRpUeQ= test_rsa.key.pub
diff --git a/tests/_util.py b/tests/_util.py
new file mode 100644
index 0000000..f0ae1d4
--- /dev/null
+++ b/tests/_util.py
@@ -0,0 +1,468 @@
+from contextlib import contextmanager
+from os.path import dirname, realpath, join
+import builtins
+import os
+from pathlib import Path
+import socket
+import struct
+import sys
+import unittest
+import time
+import threading
+
+import pytest
+
+from paramiko import (
+ ServerInterface,
+ RSAKey,
+ DSSKey,
+ AUTH_FAILED,
+ AUTH_PARTIALLY_SUCCESSFUL,
+ AUTH_SUCCESSFUL,
+ OPEN_SUCCEEDED,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ InteractiveQuery,
+ Transport,
+)
+from paramiko.ssh_gss import GSS_AUTH_AVAILABLE
+
+from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.asymmetric import padding, rsa
+
+tests_dir = dirname(realpath(__file__))
+
+from ._loop import LoopSocket
+
+
+def _support(filename):
+ base = Path(tests_dir)
+ top = base / filename
+ deeper = base / "_support" / filename
+ return str(deeper if deeper.exists() else top)
+
+
+def _config(name):
+ return join(tests_dir, "configs", name)
+
+
+needs_gssapi = pytest.mark.skipif(
+ not GSS_AUTH_AVAILABLE, reason="No GSSAPI to test"
+)
+
+
+def needs_builtin(name):
+ """
+ Skip decorated test if builtin name does not exist.
+ """
+ reason = "Test requires a builtin '{}'".format(name)
+ return pytest.mark.skipif(not hasattr(builtins, name), reason=reason)
+
+
+slow = pytest.mark.slow
+
+# GSSAPI / Kerberos related tests need a working Kerberos environment.
+# The class `KerberosTestCase` provides such an environment or skips all tests.
+# There are 3 distinct cases:
+#
+# - A Kerberos environment has already been created and the environment
+# contains the required information.
+#
+# - We can use the package 'k5test' to setup an working kerberos environment on
+# the fly.
+#
+# - We skip all tests.
+#
+# ToDo: add a Windows specific implementation?
+
+if (
+ os.environ.get("K5TEST_USER_PRINC", None)
+ and os.environ.get("K5TEST_HOSTNAME", None)
+ and os.environ.get("KRB5_KTNAME", None)
+): # add other vars as needed
+
+ # The environment provides the required information
+ class DummyK5Realm:
+ def __init__(self):
+ for k in os.environ:
+ if not k.startswith("K5TEST_"):
+ continue
+ setattr(self, k[7:].lower(), os.environ[k])
+ self.env = {}
+
+ class KerberosTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.realm = DummyK5Realm()
+
+ @classmethod
+ def tearDownClass(cls):
+ del cls.realm
+
+else:
+ try:
+ # Try to setup a kerberos environment
+ from k5test import KerberosTestCase
+ except Exception:
+ # Use a dummy, that skips all tests
+ class KerberosTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ raise unittest.SkipTest(
+ "Missing extension package k5test. "
+ 'Please run "pip install k5test" '
+ "to install it."
+ )
+
+
+def update_env(testcase, mapping, env=os.environ):
+ """Modify os.environ during a test case and restore during cleanup."""
+ saved_env = env.copy()
+
+ def replace(target, source):
+ target.update(source)
+ for k in list(target):
+ if k not in source:
+ target.pop(k, None)
+
+ testcase.addCleanup(replace, env, saved_env)
+ env.update(mapping)
+ return testcase
+
+
+def k5shell(args=None):
+ """Create a shell with an kerberos environment
+
+ This can be used to debug paramiko or to test the old GSSAPI.
+ To test a different GSSAPI, simply activate a suitable venv
+ within the shell.
+ """
+ import k5test
+ import atexit
+ import subprocess
+
+ k5 = k5test.K5Realm()
+ atexit.register(k5.stop)
+ os.environ.update(k5.env)
+ for n in ("realm", "user_princ", "hostname"):
+ os.environ["K5TEST_" + n.upper()] = getattr(k5, n)
+
+ if not args:
+ args = sys.argv[1:]
+ if not args:
+ args = [os.environ.get("SHELL", "bash")]
+ sys.exit(subprocess.call(args))
+
+
+def is_low_entropy():
+ """
+ Attempts to detect whether running interpreter is low-entropy.
+
+ "low-entropy" is defined as being in 32-bit mode and with the hash seed set
+ to zero.
+ """
+ is_32bit = struct.calcsize("P") == 32 / 8
+ # I don't see a way to tell internally if the hash seed was set this
+ # way, but env should be plenty sufficient, this is only for testing.
+ return is_32bit and os.environ.get("PYTHONHASHSEED", None) == "0"
+
+
+def sha1_signing_unsupported():
+ """
+ This is used to skip tests in environments where SHA-1 signing is
+ not supported by the backend.
+ """
+ private_key = rsa.generate_private_key(
+ public_exponent=65537, key_size=2048, backend=default_backend()
+ )
+ message = b"Some dummy text"
+ try:
+ private_key.sign(
+ message,
+ padding.PSS(
+ mgf=padding.MGF1(hashes.SHA1()),
+ salt_length=padding.PSS.MAX_LENGTH,
+ ),
+ hashes.SHA1(),
+ )
+ return False
+ except UnsupportedAlgorithm as e:
+ return e._reason is _Reasons.UNSUPPORTED_HASH
+
+
+requires_sha1_signing = unittest.skipIf(
+ sha1_signing_unsupported(), "SHA-1 signing not supported"
+)
+
+_disable_sha2 = dict(
+ disabled_algorithms=dict(keys=["rsa-sha2-256", "rsa-sha2-512"])
+)
+_disable_sha1 = dict(disabled_algorithms=dict(keys=["ssh-rsa"]))
+_disable_sha2_pubkey = dict(
+ disabled_algorithms=dict(pubkeys=["rsa-sha2-256", "rsa-sha2-512"])
+)
+_disable_sha1_pubkey = dict(disabled_algorithms=dict(pubkeys=["ssh-rsa"]))
+
+
+unicodey = "\u2022"
+
+
+class TestServer(ServerInterface):
+ paranoid_did_password = False
+ paranoid_did_public_key = False
+ # TODO: make this ed25519 or something else modern? (_is_ this used??)
+ paranoid_key = DSSKey.from_private_key_file(_support("dss.key"))
+
+ def __init__(self, allowed_keys=None):
+ self.allowed_keys = allowed_keys if allowed_keys is not None else []
+
+ def check_channel_request(self, kind, chanid):
+ if kind == "bogus":
+ return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+ return OPEN_SUCCEEDED
+
+ def check_channel_exec_request(self, channel, command):
+ if command != b"yes":
+ return False
+ return True
+
+ def check_channel_shell_request(self, channel):
+ return True
+
+ def check_global_request(self, kind, msg):
+ self._global_request = kind
+ # NOTE: for w/e reason, older impl of this returned False always, even
+ # tho that's only supposed to occur if the request cannot be served.
+ # For now, leaving that the default unless test supplies specific
+ # 'acceptable' request kind
+ return kind == "acceptable"
+
+ def check_channel_x11_request(
+ self,
+ channel,
+ single_connection,
+ auth_protocol,
+ auth_cookie,
+ screen_number,
+ ):
+ self._x11_single_connection = single_connection
+ self._x11_auth_protocol = auth_protocol
+ self._x11_auth_cookie = auth_cookie
+ self._x11_screen_number = screen_number
+ return True
+
+ def check_port_forward_request(self, addr, port):
+ self._listen = socket.socket()
+ self._listen.bind(("127.0.0.1", 0))
+ self._listen.listen(1)
+ return self._listen.getsockname()[1]
+
+ def cancel_port_forward_request(self, addr, port):
+ self._listen.close()
+ self._listen = None
+
+ def check_channel_direct_tcpip_request(self, chanid, origin, destination):
+ self._tcpip_dest = destination
+ return OPEN_SUCCEEDED
+
+ def get_allowed_auths(self, username):
+ if username == "slowdive":
+ return "publickey,password"
+ if username == "paranoid":
+ if (
+ not self.paranoid_did_password
+ and not self.paranoid_did_public_key
+ ):
+ return "publickey,password"
+ elif self.paranoid_did_password:
+ return "publickey"
+ else:
+ return "password"
+ if username == "commie":
+ return "keyboard-interactive"
+ if username == "utf8":
+ return "password"
+ if username == "non-utf8":
+ return "password"
+ return "publickey"
+
+ def check_auth_password(self, username, password):
+ if (username == "slowdive") and (password == "pygmalion"):
+ return AUTH_SUCCESSFUL
+ if (username == "paranoid") and (password == "paranoid"):
+ # 2-part auth (even openssh doesn't support this)
+ self.paranoid_did_password = True
+ if self.paranoid_did_public_key:
+ return AUTH_SUCCESSFUL
+ return AUTH_PARTIALLY_SUCCESSFUL
+ if (username == "utf8") and (password == unicodey):
+ return AUTH_SUCCESSFUL
+ if (username == "non-utf8") and (password == "\xff"):
+ return AUTH_SUCCESSFUL
+ if username == "bad-server":
+ raise Exception("Ack!")
+ if username == "unresponsive-server":
+ time.sleep(5)
+ return AUTH_SUCCESSFUL
+ return AUTH_FAILED
+
+ def check_auth_publickey(self, username, key):
+ if (username == "paranoid") and (key == self.paranoid_key):
+ # 2-part auth
+ self.paranoid_did_public_key = True
+ if self.paranoid_did_password:
+ return AUTH_SUCCESSFUL
+ return AUTH_PARTIALLY_SUCCESSFUL
+ # TODO: make sure all tests incidentally using this to pass, _without
+ # sending a username oops_, get updated somehow - probably via server()
+ # default always injecting a username
+ elif key in self.allowed_keys:
+ return AUTH_SUCCESSFUL
+ return AUTH_FAILED
+
+ def check_auth_interactive(self, username, submethods):
+ if username == "commie":
+ self.username = username
+ return InteractiveQuery(
+ "password", "Please enter a password.", ("Password", False)
+ )
+ return AUTH_FAILED
+
+ def check_auth_interactive_response(self, responses):
+ if self.username == "commie":
+ if (len(responses) == 1) and (responses[0] == "cat"):
+ return AUTH_SUCCESSFUL
+ return AUTH_FAILED
+
+
+@contextmanager
+def server(
+ hostkey=None,
+ init=None,
+ server_init=None,
+ client_init=None,
+ connect=None,
+ pubkeys=None,
+ catch_error=False,
+ transport_factory=None,
+ server_transport_factory=None,
+ defer=False,
+ skip_verify=False,
+):
+ """
+ SSH server contextmanager for testing.
+
+ Yields a tuple of ``(tc, ts)`` (client- and server-side `Transport`
+ objects), or ``(tc, ts, err)`` when ``catch_error==True``.
+
+ :param hostkey:
+ Host key to use for the server; if None, loads
+ ``rsa.key``.
+ :param init:
+ Default `Transport` constructor kwargs to use for both sides.
+ :param server_init:
+ Extends and/or overrides ``init`` for server transport only.
+ :param client_init:
+ Extends and/or overrides ``init`` for client transport only.
+ :param connect:
+ Kwargs to use for ``connect()`` on the client.
+ :param pubkeys:
+ List of public keys for auth.
+ :param catch_error:
+ Whether to capture connection errors & yield from contextmanager.
+ Necessary for connection_time exception testing.
+ :param transport_factory:
+ Like the same-named param in SSHClient: which Transport class to use.
+ :param server_transport_factory:
+ Like ``transport_factory``, but only impacts the server transport.
+ :param bool defer:
+ Whether to defer authentication during connecting.
+
+ This is really just shorthand for ``connect={}`` which would do roughly
+ the same thing. Also: this implies skip_verify=True automatically!
+ :param bool skip_verify:
+ Whether NOT to do the default "make sure auth passed" check.
+ """
+ if init is None:
+ init = {}
+ if server_init is None:
+ server_init = {}
+ if client_init is None:
+ client_init = {}
+ if connect is None:
+ # No auth at all please
+ if defer:
+ connect = dict()
+ # Default username based auth
+ else:
+ connect = dict(username="slowdive", password="pygmalion")
+ socks = LoopSocket()
+ sockc = LoopSocket()
+ sockc.link(socks)
+ if transport_factory is None:
+ transport_factory = Transport
+ if server_transport_factory is None:
+ server_transport_factory = transport_factory
+ tc = transport_factory(sockc, **dict(init, **client_init))
+ ts = server_transport_factory(socks, **dict(init, **server_init))
+
+ if hostkey is None:
+ hostkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ ts.add_server_key(hostkey)
+ event = threading.Event()
+ server = TestServer(allowed_keys=pubkeys)
+ assert not event.is_set()
+ assert not ts.is_active()
+ assert tc.get_username() is None
+ assert ts.get_username() is None
+ assert not tc.is_authenticated()
+ assert not ts.is_authenticated()
+
+ err = None
+ # Trap errors and yield instead of raising right away; otherwise callers
+ # cannot usefully deal with problems at connect time which stem from errors
+ # in the server side.
+ try:
+ ts.start_server(event, server)
+ tc.connect(**connect)
+
+ event.wait(1.0)
+ assert event.is_set()
+ assert ts.is_active()
+ assert tc.is_active()
+
+ except Exception as e:
+ if not catch_error:
+ raise
+ err = e
+
+ yield (tc, ts, err) if catch_error else (tc, ts)
+
+ if not (catch_error or skip_verify or defer):
+ assert ts.is_authenticated()
+ assert tc.is_authenticated()
+
+ tc.close()
+ ts.close()
+ socks.close()
+ sockc.close()
+
+
+def wait_until(condition, *, timeout=2):
+ """
+ Wait until `condition()` no longer raises an `AssertionError` or until
+ `timeout` seconds have passed, which causes a `TimeoutError` to be raised.
+ """
+ deadline = time.time() + timeout
+
+ while True:
+ try:
+ condition()
+ except AssertionError as e:
+ if time.time() > deadline:
+ timeout_message = f"Condition not reached after {timeout}s"
+ raise TimeoutError(timeout_message) from e
+ else:
+ return
+ time.sleep(0.01)
diff --git a/tests/agent.py b/tests/agent.py
new file mode 100644
index 0000000..bcbfb21
--- /dev/null
+++ b/tests/agent.py
@@ -0,0 +1,151 @@
+from unittest.mock import Mock
+
+from pytest import mark, raises
+
+from paramiko import AgentKey, Message, RSAKey
+from paramiko.agent import (
+ SSH2_AGENT_SIGN_RESPONSE,
+ SSH_AGENT_RSA_SHA2_256,
+ SSH_AGENT_RSA_SHA2_512,
+ cSSH2_AGENTC_SIGN_REQUEST,
+)
+
+from ._util import _support
+
+
+# AgentKey with no inner_key
+class _BareAgentKey(AgentKey):
+ def __init__(self, name, blob):
+ self.name = name
+ self.blob = blob
+ self.inner_key = None
+
+
+class AgentKey_:
+ def str_is_repr(self):
+ # Tests for a missed spot in Python 3 upgrades: AgentKey.__str__ was
+ # returning bytes, as if under Python 2. When bug present, this
+ # explodes with "__str__ returned non-string".
+ key = AgentKey(None, b"secret!!!")
+ assert str(key) == repr(key)
+
+ class init:
+ def needs_at_least_two_arguments(self):
+ with raises(TypeError):
+ AgentKey()
+ with raises(TypeError):
+ AgentKey(None)
+
+ def sets_attributes_and_parses_blob(self):
+ agent = Mock()
+ blob = Message()
+ blob.add_string("bad-type")
+ key = AgentKey(agent=agent, blob=bytes(blob))
+ assert key.agent is agent
+ assert key.name == "bad-type"
+ assert key.blob == bytes(blob)
+ assert key.comment == "" # default
+ # TODO: logger testing
+ assert key.inner_key is None # no 'bad-type' algorithm
+
+ def comment_optional(self):
+ blob = Message()
+ blob.add_string("bad-type")
+ key = AgentKey(agent=Mock(), blob=bytes(blob), comment="hi!")
+ assert key.comment == "hi!"
+
+ def sets_inner_key_when_known_type(self, keys):
+ key = AgentKey(agent=Mock(), blob=bytes(keys.pkey))
+ assert key.inner_key == keys.pkey
+
+ class fields:
+ def defaults_to_get_name_and_blob(self):
+ key = _BareAgentKey(name="lol", blob=b"lmao")
+ assert key._fields == ["lol", b"lmao"]
+
+ # TODO: pytest-relaxed is buggy (now?), this shows up under get_bits?
+ def defers_to_inner_key_when_present(self, keys):
+ key = AgentKey(agent=None, blob=keys.pkey.asbytes())
+ assert key._fields == keys.pkey._fields
+ assert key == keys.pkey
+
+ class get_bits:
+ def defaults_to_superclass_implementation(self):
+ # TODO 4.0: assert raises NotImplementedError like changed parent?
+ assert _BareAgentKey(None, None).get_bits() == 0
+
+ def defers_to_inner_key_when_present(self, keys):
+ key = AgentKey(agent=None, blob=keys.pkey.asbytes())
+ assert key.get_bits() == keys.pkey.get_bits()
+
+ class asbytes:
+ def defaults_to_owned_blob(self):
+ blob = Mock()
+ assert _BareAgentKey(name=None, blob=blob).asbytes() is blob
+
+ def defers_to_inner_key_when_present(self, keys):
+ key = AgentKey(agent=None, blob=keys.pkey_with_cert.asbytes())
+ # Artificially make outer key blob != inner key blob; comment in
+ # AgentKey.asbytes implies this can sometimes really happen but I
+ # no longer recall when that could be?
+ key.blob = b"nope"
+ assert key.asbytes() == key.inner_key.asbytes()
+
+ @mark.parametrize(
+ "sign_kwargs,expected_flag",
+ [
+ # No algorithm kwarg: no flags (bitfield -> 0 int)
+ (dict(), 0),
+ (dict(algorithm="rsa-sha2-256"), SSH_AGENT_RSA_SHA2_256),
+ (dict(algorithm="rsa-sha2-512"), SSH_AGENT_RSA_SHA2_512),
+ # TODO: ideally we only send these when key is a cert,
+ # but it doesn't actually break when not; meh. Really just wants
+ # all the parameterization of this test rethought.
+ (
+ dict(algorithm="rsa-sha2-256-cert-v01@openssh.com"),
+ SSH_AGENT_RSA_SHA2_256,
+ ),
+ (
+ dict(algorithm="rsa-sha2-512-cert-v01@openssh.com"),
+ SSH_AGENT_RSA_SHA2_512,
+ ),
+ ],
+ )
+ def signing_data(self, sign_kwargs, expected_flag):
+ class FakeAgent:
+ def _send_message(self, msg):
+ # The thing we actually care most about, we're not testing
+ # ssh-agent itself here
+ self._sent_message = msg
+ sig = Message()
+ sig.add_string("lol")
+ sig.rewind()
+ return SSH2_AGENT_SIGN_RESPONSE, sig
+
+ for do_cert in (False, True):
+ agent = FakeAgent()
+ # Get key kinda like how a real agent would give it to us - if
+ # cert, it'd be the entire public blob, not just the pubkey. This
+ # ensures the code under test sends _just the pubkey part_ back to
+ # the agent during signature requests (bug was us sending _the
+ # entire cert blob_, which somehow "worked ok" but always got us
+ # SHA1)
+ # NOTE: using lower level loader to avoid auto-cert-load when
+ # testing regular key (agents expose them separately)
+ inner_key = RSAKey.from_private_key_file(_support("rsa.key"))
+ blobby = inner_key.asbytes()
+ # NOTE: expected key blob always wants to be the real key, even
+ # when the "key" is a certificate.
+ expected_request_key_blob = blobby
+ if do_cert:
+ inner_key.load_certificate(_support("rsa.key-cert.pub"))
+ blobby = inner_key.public_blob.key_blob
+ key = AgentKey(agent, blobby)
+ result = key.sign_ssh_data(b"data-to-sign", **sign_kwargs)
+ assert result == b"lol"
+ msg = agent._sent_message
+ msg.rewind()
+ assert msg.get_byte() == cSSH2_AGENTC_SIGN_REQUEST
+ assert msg.get_string() == expected_request_key_blob
+ assert msg.get_string() == b"data-to-sign"
+ assert msg.get_int() == expected_flag
diff --git a/tests/auth.py b/tests/auth.py
new file mode 100644
index 0000000..c0afe88
--- /dev/null
+++ b/tests/auth.py
@@ -0,0 +1,580 @@
+"""
+Tests focusing primarily on the authentication step.
+
+Thus, they concern AuthHandler and AuthStrategy, with a side of Transport.
+"""
+
+from logging import Logger
+from unittest.mock import Mock
+
+from pytest import raises
+
+from paramiko import (
+ AgentKey,
+ AuthenticationException,
+ AuthFailure,
+ AuthResult,
+ AuthSource,
+ AuthStrategy,
+ BadAuthenticationType,
+ DSSKey,
+ InMemoryPrivateKey,
+ NoneAuth,
+ OnDiskPrivateKey,
+ Password,
+ PrivateKey,
+ PKey,
+ RSAKey,
+ SSHException,
+ ServiceRequestingTransport,
+ SourceResult,
+)
+
+from ._util import (
+ _disable_sha1_pubkey,
+ _disable_sha2,
+ _disable_sha2_pubkey,
+ _support,
+ requires_sha1_signing,
+ server,
+ unicodey,
+)
+
+
+class AuthHandler_:
+ """
+ Most of these tests are explicit about the auth method they call.
+
+ This is because not too many other tests do so (they rely on the implicit
+ auth trigger of various connect() kwargs).
+ """
+
+ def bad_auth_type(self):
+ """
+ verify that we get the right exception when an unsupported auth
+ type is requested.
+ """
+ # Server won't allow password auth for this user, so should fail
+ # and return just publickey allowed types
+ with server(
+ connect=dict(username="unknown", password="error"),
+ catch_error=True,
+ ) as (_, _, err):
+ assert isinstance(err, BadAuthenticationType)
+ assert err.allowed_types == ["publickey"]
+
+ def bad_password(self):
+ """
+ verify that a bad password gets the right exception, and that a retry
+ with the right password works.
+ """
+ # NOTE: Transport.connect doesn't do any auth upfront if no userauth
+ # related kwargs given.
+ with server(defer=True) as (tc, ts):
+ # Auth once, badly
+ with raises(AuthenticationException):
+ tc.auth_password(username="slowdive", password="error")
+ # And again, correctly
+ tc.auth_password(username="slowdive", password="pygmalion")
+
+ def multipart_auth(self):
+ """
+ verify that multipart auth works.
+ """
+ with server(defer=True) as (tc, ts):
+ assert tc.auth_password(
+ username="paranoid", password="paranoid"
+ ) == ["publickey"]
+ key = DSSKey.from_private_key_file(_support("dss.key"))
+ assert tc.auth_publickey(username="paranoid", key=key) == []
+
+ def interactive_auth(self):
+ """
+ verify keyboard-interactive auth works.
+ """
+
+ def handler(title, instructions, prompts):
+ self.got_title = title
+ self.got_instructions = instructions
+ self.got_prompts = prompts
+ return ["cat"]
+
+ with server(defer=True) as (tc, ts):
+ assert tc.auth_interactive("commie", handler) == []
+ assert self.got_title == "password"
+ assert self.got_prompts == [("Password", False)]
+
+ def interactive_fallback(self):
+ """
+ verify that a password auth attempt will fallback to "interactive"
+ if password auth isn't supported but interactive is.
+ """
+ with server(defer=True) as (tc, ts):
+ # This username results in an allowed_auth of just kbd-int,
+ # and has a configured interactive->response on the server.
+ assert tc.auth_password("commie", "cat") == []
+
+ def utf8(self):
+ """
+ verify that utf-8 encoding happens in authentication.
+ """
+ with server(defer=True) as (tc, ts):
+ assert tc.auth_password("utf8", unicodey) == []
+
+ def non_utf8(self):
+ """
+ verify that non-utf-8 encoded passwords can be used for broken
+ servers.
+ """
+ with server(defer=True) as (tc, ts):
+ assert tc.auth_password("non-utf8", "\xff") == []
+
+ def auth_exception_when_disconnected(self):
+ """
+ verify that we catch a server disconnecting during auth, and report
+ it as an auth failure.
+ """
+ with server(defer=True, skip_verify=True) as (tc, ts), raises(
+ AuthenticationException
+ ):
+ tc.auth_password("bad-server", "hello")
+
+ def non_responsive_triggers_auth_exception(self):
+ """
+ verify that authentication times out if server takes to long to
+ respond (or never responds).
+ """
+ with server(defer=True, skip_verify=True) as (tc, ts), raises(
+ AuthenticationException
+ ) as info:
+ tc.auth_timeout = 1 # 1 second, to speed up test
+ tc.auth_password("unresponsive-server", "hello")
+ assert "Authentication timeout" in str(info.value)
+
+
+class AuthOnlyHandler_:
+ def _server(self, *args, **kwargs):
+ kwargs.setdefault("transport_factory", ServiceRequestingTransport)
+ return server(*args, **kwargs)
+
+ class fallback_pubkey_algorithm:
+ @requires_sha1_signing
+ def key_type_algo_selected_when_no_server_sig_algs(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ # Server pretending to be an apparently common setup:
+ # - doesn't support (or have enabled) sha2
+ # - also doesn't support (or have enabled) server-sig-algs/ext-info
+ # This is the scenario in which Paramiko has to guess-the-algo, and
+ # where servers that don't support sha2 or server-sig-algs can give
+ # us trouble.
+ server_init = dict(_disable_sha2_pubkey, server_sig_algs=False)
+ with self._server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ server_init=server_init,
+ catch_error=True,
+ ) as (tc, ts, err):
+ # Auth did work
+ assert tc.is_authenticated()
+ # Selected ssh-rsa, instead of first-in-the-list (rsa-sha2-512)
+ assert tc._agreed_pubkey_algorithm == "ssh-rsa"
+
+ @requires_sha1_signing
+ def key_type_algo_selection_is_cert_suffix_aware(self):
+ # This key has a cert next to it, which should trigger cert-aware
+ # loading within key classes.
+ privkey = PKey.from_path(_support("rsa.key"))
+ server_init = dict(_disable_sha2_pubkey, server_sig_algs=False)
+ with self._server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ server_init=server_init,
+ catch_error=True,
+ ) as (tc, ts, err):
+ assert not err
+ # Auth did work
+ assert tc.is_authenticated()
+ # Selected expected cert type
+ assert (
+ tc._agreed_pubkey_algorithm
+ == "ssh-rsa-cert-v01@openssh.com"
+ )
+
+ @requires_sha1_signing
+ def uses_first_preferred_algo_if_key_type_not_in_list(self):
+ # This is functionally the same as legacy AuthHandler, just
+ # arriving at the same place in a different manner.
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ server_init = dict(_disable_sha2_pubkey, server_sig_algs=False)
+ with self._server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ server_init=server_init,
+ client_init=_disable_sha1_pubkey, # no ssh-rsa
+ catch_error=True,
+ ) as (tc, ts, err):
+ assert not tc.is_authenticated()
+ assert isinstance(err, AuthenticationException)
+ assert tc._agreed_pubkey_algorithm == "rsa-sha2-512"
+
+
+class SHA2SignaturePubkeys:
+ def pubkey_auth_honors_disabled_algorithms(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ with server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ init=dict(
+ disabled_algorithms=dict(
+ pubkeys=["ssh-rsa", "rsa-sha2-256", "rsa-sha2-512"]
+ )
+ ),
+ catch_error=True,
+ ) as (_, _, err):
+ assert isinstance(err, SSHException)
+ assert "no RSA pubkey algorithms" in str(err)
+
+ def client_sha2_disabled_server_sha1_disabled_no_match(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ with server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ client_init=_disable_sha2_pubkey,
+ server_init=_disable_sha1_pubkey,
+ catch_error=True,
+ ) as (tc, ts, err):
+ assert isinstance(err, AuthenticationException)
+
+ def client_sha1_disabled_server_sha2_disabled_no_match(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ with server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ client_init=_disable_sha1_pubkey,
+ server_init=_disable_sha2_pubkey,
+ catch_error=True,
+ ) as (tc, ts, err):
+ assert isinstance(err, AuthenticationException)
+
+ @requires_sha1_signing
+ def ssh_rsa_still_used_when_sha2_disabled(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ # NOTE: this works because key obj comparison uses public bytes
+ # TODO: would be nice for PKey to grow a legit "give me another obj of
+ # same class but just the public bits" using asbytes()
+ with server(
+ pubkeys=[privkey], connect=dict(pkey=privkey), init=_disable_sha2
+ ) as (tc, _):
+ assert tc.is_authenticated()
+
+ @requires_sha1_signing
+ def first_client_preferred_algo_used_when_no_server_sig_algs(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ # Server pretending to be an apparently common setup:
+ # - doesn't support (or have enabled) sha2
+ # - also doesn't support (or have enabled) server-sig-algs/ext-info
+ # This is the scenario in which Paramiko has to guess-the-algo, and
+ # where servers that don't support sha2 or server-sig-algs give us
+ # trouble.
+ server_init = dict(_disable_sha2_pubkey, server_sig_algs=False)
+ with server(
+ pubkeys=[privkey],
+ connect=dict(username="slowdive", pkey=privkey),
+ server_init=server_init,
+ catch_error=True,
+ ) as (tc, ts, err):
+ assert not tc.is_authenticated()
+ assert isinstance(err, AuthenticationException)
+ # Oh no! this isn't ssh-rsa, and our server doesn't support sha2!
+ assert tc._agreed_pubkey_algorithm == "rsa-sha2-512"
+
+ def sha2_512(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ with server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ init=dict(
+ disabled_algorithms=dict(pubkeys=["ssh-rsa", "rsa-sha2-256"])
+ ),
+ ) as (tc, ts):
+ assert tc.is_authenticated()
+ assert tc._agreed_pubkey_algorithm == "rsa-sha2-512"
+
+ def sha2_256(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ with server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ init=dict(
+ disabled_algorithms=dict(pubkeys=["ssh-rsa", "rsa-sha2-512"])
+ ),
+ ) as (tc, ts):
+ assert tc.is_authenticated()
+ assert tc._agreed_pubkey_algorithm == "rsa-sha2-256"
+
+ def sha2_256_when_client_only_enables_256(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ with server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ # Client-side only; server still accepts all 3.
+ client_init=dict(
+ disabled_algorithms=dict(pubkeys=["ssh-rsa", "rsa-sha2-512"])
+ ),
+ ) as (tc, ts):
+ assert tc.is_authenticated()
+ assert tc._agreed_pubkey_algorithm == "rsa-sha2-256"
+
+
+class AuthSource_:
+ class base_class:
+ def init_requires_and_saves_username(self):
+ with raises(TypeError):
+ AuthSource()
+ assert AuthSource(username="foo").username == "foo"
+
+ def dunder_repr_delegates_to_helper(self):
+ source = AuthSource("foo")
+ source._repr = Mock(wraps=lambda: "whatever")
+ repr(source)
+ source._repr.assert_called_once_with()
+
+ def repr_helper_prints_basic_kv_pairs(self):
+ assert repr(AuthSource("foo")) == "AuthSource()"
+ assert (
+ AuthSource("foo")._repr(bar="open") == "AuthSource(bar='open')"
+ )
+
+ def authenticate_takes_transport_and_is_abstract(self):
+ # TODO: this test kinda just goes away once we're typed?
+ with raises(TypeError):
+ AuthSource("foo").authenticate()
+ with raises(NotImplementedError):
+ AuthSource("foo").authenticate(None)
+
+ class NoneAuth_:
+ def authenticate_auths_none(self):
+ trans = Mock()
+ result = NoneAuth("foo").authenticate(trans)
+ trans.auth_none.assert_called_once_with("foo")
+ assert result is trans.auth_none.return_value
+
+ def repr_shows_class(self):
+ assert repr(NoneAuth("foo")) == "NoneAuth()"
+
+ class Password_:
+ def init_takes_and_stores_password_getter(self):
+ with raises(TypeError):
+ Password("foo")
+ getter = Mock()
+ pw = Password("foo", password_getter=getter)
+ assert pw.password_getter is getter
+
+ def repr_adds_username(self):
+ pw = Password("foo", password_getter=Mock())
+ assert repr(pw) == "Password(user='foo')"
+
+ def authenticate_gets_and_supplies_password(self):
+ getter = Mock(return_value="bar")
+ trans = Mock()
+ pw = Password("foo", password_getter=getter)
+ result = pw.authenticate(trans)
+ trans.auth_password.assert_called_once_with("foo", "bar")
+ assert result is trans.auth_password.return_value
+
+ class PrivateKey_:
+ def authenticate_calls_publickey_with_pkey(self):
+ source = PrivateKey(username="foo")
+ source.pkey = Mock() # set by subclasses
+ trans = Mock()
+ result = source.authenticate(trans)
+ trans.auth_publickey.assert_called_once_with("foo", source.pkey)
+ assert result is trans.auth_publickey.return_value
+
+ class InMemoryPrivateKey_:
+ def init_takes_pkey_object(self):
+ with raises(TypeError):
+ InMemoryPrivateKey("foo")
+ pkey = Mock()
+ source = InMemoryPrivateKey(username="foo", pkey=pkey)
+ assert source.pkey is pkey
+
+ def repr_shows_pkey_repr(self):
+ pkey = PKey.from_path(_support("ed25519.key"))
+ source = InMemoryPrivateKey("foo", pkey)
+ assert (
+ repr(source)
+ == "InMemoryPrivateKey(pkey=PKey(alg=ED25519, bits=256, fp=SHA256:J6VESFdD3xSChn8y9PzWzeF+1tl892mOy2TqkMLO4ow))" # noqa
+ )
+
+ def repr_appends_agent_flag_when_AgentKey(self):
+ real_key = PKey.from_path(_support("ed25519.key"))
+ pkey = AgentKey(agent=None, blob=bytes(real_key))
+ source = InMemoryPrivateKey("foo", pkey)
+ assert (
+ repr(source)
+ == "InMemoryPrivateKey(pkey=PKey(alg=ED25519, bits=256, fp=SHA256:J6VESFdD3xSChn8y9PzWzeF+1tl892mOy2TqkMLO4ow)) [agent]" # noqa
+ )
+
+ class OnDiskPrivateKey_:
+ def init_takes_source_path_and_pkey(self):
+ with raises(TypeError):
+ OnDiskPrivateKey("foo")
+ with raises(TypeError):
+ OnDiskPrivateKey("foo", "bar")
+ with raises(TypeError):
+ OnDiskPrivateKey("foo", "bar", "biz")
+ source = OnDiskPrivateKey(
+ username="foo",
+ source="ssh-config",
+ path="of-exile",
+ pkey="notreally",
+ )
+ assert source.username == "foo"
+ assert source.source == "ssh-config"
+ assert source.path == "of-exile"
+ assert source.pkey == "notreally"
+
+ def init_requires_specific_value_for_source(self):
+ with raises(
+ ValueError,
+ match=r"source argument must be one of: \('ssh-config', 'python-config', 'implicit-home'\)", # noqa
+ ):
+ OnDiskPrivateKey("foo", source="what?", path="meh", pkey="no")
+
+ def repr_reflects_source_path_and_pkey(self):
+ source = OnDiskPrivateKey(
+ username="foo",
+ source="ssh-config",
+ path="of-exile",
+ pkey="notreally",
+ )
+ assert (
+ repr(source)
+ == "OnDiskPrivateKey(key='notreally', source='ssh-config', path='of-exile')" # noqa
+ )
+
+
+class AuthResult_:
+ def setup_method(self):
+ self.strat = AuthStrategy(None)
+
+ def acts_like_list_with_strategy_attribute(self):
+ with raises(TypeError):
+ AuthResult()
+ # kwarg works by itself
+ AuthResult(strategy=self.strat)
+ # or can be given as posarg w/ regular list() args after
+ result = AuthResult(self.strat, [1, 2, 3])
+ assert result.strategy is self.strat
+ assert result == [1, 2, 3]
+ assert isinstance(result, list)
+
+ def repr_is_list_repr_untouched(self):
+ result = AuthResult(self.strat, [1, 2, 3])
+ assert repr(result) == "[1, 2, 3]"
+
+ class dunder_str:
+ def is_multiline_display_of_sourceresult_tuples(self):
+ result = AuthResult(self.strat)
+ result.append(SourceResult("foo", "bar"))
+ result.append(SourceResult("biz", "baz"))
+ assert str(result) == "foo -> bar\nbiz -> baz"
+
+ def shows_str_not_repr_of_auth_source_and_result(self):
+ result = AuthResult(self.strat)
+ result.append(
+ SourceResult(NoneAuth("foo"), ["password", "pubkey"])
+ )
+ assert str(result) == "NoneAuth() -> ['password', 'pubkey']"
+
+ def empty_list_result_values_show_success_string(self):
+ result = AuthResult(self.strat)
+ result.append(SourceResult(NoneAuth("foo"), []))
+ assert str(result) == "NoneAuth() -> success"
+
+
+class AuthFailure_:
+ def is_an_AuthenticationException(self):
+ assert isinstance(AuthFailure(None), AuthenticationException)
+
+ def init_requires_result(self):
+ with raises(TypeError):
+ AuthFailure()
+ result = AuthResult(None)
+ fail = AuthFailure(result=result)
+ assert fail.result is result
+
+ def str_is_newline_plus_result_str(self):
+ result = AuthResult(None)
+ result.append(SourceResult(NoneAuth("foo"), Exception("onoz")))
+ fail = AuthFailure(result)
+ assert str(fail) == "\nNoneAuth() -> onoz"
+
+
+class AuthStrategy_:
+ def init_requires_ssh_config_param_and_sets_up_a_logger(self):
+ with raises(TypeError):
+ AuthStrategy()
+ conf = object()
+ strat = AuthStrategy(ssh_config=conf)
+ assert strat.ssh_config is conf
+ assert isinstance(strat.log, Logger)
+ assert strat.log.name == "paramiko.auth_strategy"
+
+ def get_sources_is_abstract(self):
+ with raises(NotImplementedError):
+ AuthStrategy(None).get_sources()
+
+ class authenticate:
+ def setup_method(self):
+ self.strat = AuthStrategy(None) # ssh_config not used directly
+ self.source, self.transport = NoneAuth(None), Mock()
+ self.source.authenticate = Mock()
+ self.strat.get_sources = Mock(return_value=[self.source])
+
+ def requires_and_uses_transport_with_methods_returning_result(self):
+ with raises(TypeError):
+ self.strat.authenticate()
+ result = self.strat.authenticate(self.transport)
+ self.strat.get_sources.assert_called_once_with()
+ self.source.authenticate.assert_called_once_with(self.transport)
+ assert isinstance(result, AuthResult)
+ assert result.strategy is self.strat
+ assert len(result) == 1
+ source_res = result[0]
+ assert isinstance(source_res, SourceResult)
+ assert source_res.source is self.source
+ assert source_res.result is self.source.authenticate.return_value
+
+ def logs_sources_attempted(self):
+ self.strat.log = Mock()
+ self.strat.authenticate(self.transport)
+ self.strat.log.debug.assert_called_once_with("Trying NoneAuth()")
+
+ def raises_AuthFailure_if_no_successes(self):
+ self.strat.log = Mock()
+ oops = Exception("onoz")
+ self.source.authenticate.side_effect = oops
+ with raises(AuthFailure) as info:
+ self.strat.authenticate(self.transport)
+ result = info.value.result
+ assert isinstance(result, AuthResult)
+ assert len(result) == 1
+ source_res = result[0]
+ assert isinstance(source_res, SourceResult)
+ assert source_res.source is self.source
+ assert source_res.result is oops
+ self.strat.log.info.assert_called_once_with(
+ "Authentication via NoneAuth() failed with Exception"
+ )
+
+ def short_circuits_on_successful_auth(self):
+ kaboom = Mock(authenticate=Mock(side_effect=Exception("onoz")))
+ self.strat.get_sources.return_value = [self.source, kaboom]
+ result = self.strat.authenticate(self.transport)
+ # No exception, and it's just a regular ol Result
+ assert isinstance(result, AuthResult)
+ # And it did not capture any attempt to execute the 2nd source
+ assert len(result) == 1
+ assert result[0].source is self.source
diff --git a/tests/badhash_key1.ed25519.key b/tests/badhash_key1.ed25519.key
new file mode 100644
index 0000000..3e33781
--- /dev/null
+++ b/tests/badhash_key1.ed25519.key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZWQyNTUx
+OQAAACCULQjdmVfwpbDAFYz4mhKo6aCiAfkbaC+dEdq5eP1R9QAAAIjXZhzv12Yc7wAAAAtzc2gt
+ZWQyNTUxOQAAACCULQjdmVfwpbDAFYz4mhKo6aCiAfkbaC+dEdq5eP1R9QAAAEByeJbhZUBL2aJ6
+wP85amzQuqDJRrNyAGMtDBJ43SURxpQtCN2ZV/ClsMAVjPiaEqjpoKIB+RtoL50R2rl4/VH1AAAA
+AAECAwQF
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/badhash_key2.ed25519.key b/tests/badhash_key2.ed25519.key
new file mode 100644
index 0000000..bf48eda
--- /dev/null
+++ b/tests/badhash_key2.ed25519.key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZWQyNTUx
+OQAAACACJbNmFu2Bk34HArxhiRYajoIN03Vr0umfNvsc9atE0AAAAIi5+po1ufqaNQAAAAtzc2gt
+ZWQyNTUxOQAAACACJbNmFu2Bk34HArxhiRYajoIN03Vr0umfNvsc9atE0AAAAECh/ZzZJDOZGnil
+BxJMm+nOhBpc07IVBjU1ii+S8zqFaAIls2YW7YGTfgcCvGGJFhqOgg3TdWvS6Z82+xz1q0TQAAAA
+AAECAwQF
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/blank_rsa.key b/tests/blank_rsa.key
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/blank_rsa.key
diff --git a/tests/configs/basic b/tests/configs/basic
new file mode 100644
index 0000000..93fe3be
--- /dev/null
+++ b/tests/configs/basic
@@ -0,0 +1,4 @@
+CanonicalDomains paramiko.org
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/canon b/tests/configs/canon
new file mode 100644
index 0000000..7b97940
--- /dev/null
+++ b/tests/configs/canon
@@ -0,0 +1,8 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+
+IdentityFile base.key
+
+Host www.paramiko.org
+ User rando
+ IdentityFile canonicalized.key
diff --git a/tests/configs/canon-always b/tests/configs/canon-always
new file mode 100644
index 0000000..f3f56b7
--- /dev/null
+++ b/tests/configs/canon-always
@@ -0,0 +1,5 @@
+CanonicalDomains paramiko.org
+CanonicalizeHostname always
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/canon-ipv4 b/tests/configs/canon-ipv4
new file mode 100644
index 0000000..92c3875
--- /dev/null
+++ b/tests/configs/canon-ipv4
@@ -0,0 +1,6 @@
+CanonicalDomains paramiko.org
+CanonicalizeHostname yes
+AddressFamily inet
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/canon-local b/tests/configs/canon-local
new file mode 100644
index 0000000..dde9f77
--- /dev/null
+++ b/tests/configs/canon-local
@@ -0,0 +1,6 @@
+Host www.paramiko.org
+ User rando
+
+Host www
+ CanonicalDomains paramiko.org
+ CanonicalizeHostname yes
diff --git a/tests/configs/canon-local-always b/tests/configs/canon-local-always
new file mode 100644
index 0000000..0ad0535
--- /dev/null
+++ b/tests/configs/canon-local-always
@@ -0,0 +1,6 @@
+Host www.paramiko.org
+ User rando
+
+Host www
+ CanonicalDomains paramiko.org
+ CanonicalizeHostname always
diff --git a/tests/configs/deep-canon b/tests/configs/deep-canon
new file mode 100644
index 0000000..483823d
--- /dev/null
+++ b/tests/configs/deep-canon
@@ -0,0 +1,11 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+
+Host www.paramiko.org
+ User rando
+
+Host sub.www.paramiko.org
+ User deep
+
+Host subber.sub.www.paramiko.org
+ User deeper
diff --git a/tests/configs/deep-canon-maxdots b/tests/configs/deep-canon-maxdots
new file mode 100644
index 0000000..7785f66
--- /dev/null
+++ b/tests/configs/deep-canon-maxdots
@@ -0,0 +1,12 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+CanonicalizeMaxDots 2
+
+Host www.paramiko.org
+ User rando
+
+Host sub.www.paramiko.org
+ User deep
+
+Host subber.sub.www.paramiko.org
+ User deeper
diff --git a/tests/configs/empty-canon b/tests/configs/empty-canon
new file mode 100644
index 0000000..19743ad
--- /dev/null
+++ b/tests/configs/empty-canon
@@ -0,0 +1,6 @@
+CanonicalizeHostname yes
+CanonicalDomains
+AddressFamily inet
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/fallback-no b/tests/configs/fallback-no
new file mode 100644
index 0000000..ec8d13e
--- /dev/null
+++ b/tests/configs/fallback-no
@@ -0,0 +1,6 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+CanonicalizeFallbackLocal no
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/fallback-yes b/tests/configs/fallback-yes
new file mode 100644
index 0000000..bc4f4ee
--- /dev/null
+++ b/tests/configs/fallback-yes
@@ -0,0 +1,6 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+CanonicalizeFallbackLocal yes
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/hostname-exec-tokenized b/tests/configs/hostname-exec-tokenized
new file mode 100644
index 0000000..1cae2c0
--- /dev/null
+++ b/tests/configs/hostname-exec-tokenized
@@ -0,0 +1,2 @@
+Match exec "ping %h"
+ HostName pingable.%h
diff --git a/tests/configs/hostname-tokenized b/tests/configs/hostname-tokenized
new file mode 100644
index 0000000..1905c0c
--- /dev/null
+++ b/tests/configs/hostname-tokenized
@@ -0,0 +1 @@
+HostName prefix.%h
diff --git a/tests/configs/invalid b/tests/configs/invalid
new file mode 100644
index 0000000..81332fe
--- /dev/null
+++ b/tests/configs/invalid
@@ -0,0 +1 @@
+lolwut
diff --git a/tests/configs/match-all b/tests/configs/match-all
new file mode 100644
index 0000000..7673e0a
--- /dev/null
+++ b/tests/configs/match-all
@@ -0,0 +1,2 @@
+Match all
+ User awesome
diff --git a/tests/configs/match-all-after-canonical b/tests/configs/match-all-after-canonical
new file mode 100644
index 0000000..531112c
--- /dev/null
+++ b/tests/configs/match-all-after-canonical
@@ -0,0 +1,5 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+
+Match canonical all
+ User awesome
diff --git a/tests/configs/match-all-and-more b/tests/configs/match-all-and-more
new file mode 100644
index 0000000..bb50696
--- /dev/null
+++ b/tests/configs/match-all-and-more
@@ -0,0 +1,2 @@
+Match all exec "lol nope"
+ HostName whatever
diff --git a/tests/configs/match-all-and-more-before b/tests/configs/match-all-and-more-before
new file mode 100644
index 0000000..4d5b2e3
--- /dev/null
+++ b/tests/configs/match-all-and-more-before
@@ -0,0 +1,2 @@
+Match exec "lol nope" all
+ HostName whatever
diff --git a/tests/configs/match-all-before-canonical b/tests/configs/match-all-before-canonical
new file mode 100644
index 0000000..35e3b0e
--- /dev/null
+++ b/tests/configs/match-all-before-canonical
@@ -0,0 +1,5 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+
+Match all canonical
+ User oops
diff --git a/tests/configs/match-canonical-no b/tests/configs/match-canonical-no
new file mode 100644
index 0000000..e528dc6
--- /dev/null
+++ b/tests/configs/match-canonical-no
@@ -0,0 +1,7 @@
+CanonicalizeHostname no
+
+Match canonical all
+ User awesome
+
+Match !canonical host specific
+ User overload
diff --git a/tests/configs/match-canonical-yes b/tests/configs/match-canonical-yes
new file mode 100644
index 0000000..d6c2092
--- /dev/null
+++ b/tests/configs/match-canonical-yes
@@ -0,0 +1,5 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+
+Match !canonical host www*
+ User hidden
diff --git a/tests/configs/match-complex b/tests/configs/match-complex
new file mode 100644
index 0000000..6363403
--- /dev/null
+++ b/tests/configs/match-complex
@@ -0,0 +1,17 @@
+HostName bogus
+
+Match originalhost target host bogus
+ User rand
+
+Match originalhost remote localuser rando
+ User calrissian
+
+# Just to set user for subsequent match
+Match originalhost www
+ User calrissian
+
+Match !canonical originalhost www host bogus localuser rando user calrissian
+ Port 7777
+
+Match !canonical !originalhost www host bogus localuser rando !user calrissian
+ Port 1234
diff --git a/tests/configs/match-exec b/tests/configs/match-exec
new file mode 100644
index 0000000..62a147a
--- /dev/null
+++ b/tests/configs/match-exec
@@ -0,0 +1,16 @@
+Match exec "quoted"
+ User benjamin
+
+Match exec unquoted
+ User rando
+
+Match exec "quoted spaced"
+ User neil
+
+# Just to prepopulate values for tokenizing subsequent exec
+Host target
+ User intermediate
+ HostName configured
+
+Match exec "%C %d %h %L %l %n %p %r %u"
+ Port 1337
diff --git a/tests/configs/match-exec-canonical b/tests/configs/match-exec-canonical
new file mode 100644
index 0000000..794ee9d
--- /dev/null
+++ b/tests/configs/match-exec-canonical
@@ -0,0 +1,10 @@
+CanonicalDomains paramiko.org
+CanonicalizeHostname always
+
+# This will match in the first, uncanonicalized pass
+Match !canonical exec uncanonicalized
+ User defenseless
+
+# And this will match the second time
+Match canonical exec canonicalized
+ Port 8007
diff --git a/tests/configs/match-exec-negation b/tests/configs/match-exec-negation
new file mode 100644
index 0000000..937c910
--- /dev/null
+++ b/tests/configs/match-exec-negation
@@ -0,0 +1,5 @@
+Match !exec "this succeeds"
+ User nope
+
+Match !exec "this fails"
+ User yup
diff --git a/tests/configs/match-exec-no-arg b/tests/configs/match-exec-no-arg
new file mode 100644
index 0000000..20c16d1
--- /dev/null
+++ b/tests/configs/match-exec-no-arg
@@ -0,0 +1,2 @@
+Match exec
+ User uh-oh
diff --git a/tests/configs/match-final b/tests/configs/match-final
new file mode 100644
index 0000000..21e927f
--- /dev/null
+++ b/tests/configs/match-final
@@ -0,0 +1,14 @@
+Host jump
+ HostName jump.example.org
+ Port 1003
+
+Host finally
+ HostName finally.example.org
+ Port 1001
+
+Host default-port
+ HostName default-port.example.org
+
+Match final host "*.example.org" !host jump.example.org
+ ProxyJump jump
+ Port 1002
diff --git a/tests/configs/match-host b/tests/configs/match-host
new file mode 100644
index 0000000..86cbff5
--- /dev/null
+++ b/tests/configs/match-host
@@ -0,0 +1,2 @@
+Match host target
+ User rand
diff --git a/tests/configs/match-host-canonicalized b/tests/configs/match-host-canonicalized
new file mode 100644
index 0000000..52dadea
--- /dev/null
+++ b/tests/configs/match-host-canonicalized
@@ -0,0 +1,8 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+
+Match host www.paramiko.org
+ User rand
+
+Match canonical host docs.paramiko.org
+ User eric
diff --git a/tests/configs/match-host-from-match b/tests/configs/match-host-from-match
new file mode 100644
index 0000000..172ee11
--- /dev/null
+++ b/tests/configs/match-host-from-match
@@ -0,0 +1,5 @@
+Match host original-host
+ HostName substituted-host
+
+Match host substituted-host
+ User inner
diff --git a/tests/configs/match-host-glob b/tests/configs/match-host-glob
new file mode 100644
index 0000000..3d53cf4
--- /dev/null
+++ b/tests/configs/match-host-glob
@@ -0,0 +1,2 @@
+Match host *ever
+ User matrim
diff --git a/tests/configs/match-host-glob-list b/tests/configs/match-host-glob-list
new file mode 100644
index 0000000..3617d13
--- /dev/null
+++ b/tests/configs/match-host-glob-list
@@ -0,0 +1,8 @@
+Match host *ever
+ User matrim
+
+Match host somehost,someotherhost
+ User thom
+
+Match host goo*,!goof
+ User perrin
diff --git a/tests/configs/match-host-name b/tests/configs/match-host-name
new file mode 100644
index 0000000..783d939
--- /dev/null
+++ b/tests/configs/match-host-name
@@ -0,0 +1,4 @@
+HostName default-host
+
+Match host default-host
+ User silly
diff --git a/tests/configs/match-host-negated b/tests/configs/match-host-negated
new file mode 100644
index 0000000..7c5d3f3
--- /dev/null
+++ b/tests/configs/match-host-negated
@@ -0,0 +1,2 @@
+Match !host www
+ User jeff
diff --git a/tests/configs/match-host-no-arg b/tests/configs/match-host-no-arg
new file mode 100644
index 0000000..191cebb
--- /dev/null
+++ b/tests/configs/match-host-no-arg
@@ -0,0 +1,2 @@
+Match host
+ User oops
diff --git a/tests/configs/match-localuser b/tests/configs/match-localuser
new file mode 100644
index 0000000..fe4a276
--- /dev/null
+++ b/tests/configs/match-localuser
@@ -0,0 +1,14 @@
+Match localuser gandalf
+ HostName gondor
+
+Match localuser b*
+ HostName shire
+
+Match localuser aragorn,frodo
+ HostName moria
+
+Match localuser gimli,!legolas
+ Port 7373
+
+Match !localuser sauron
+ HostName mordor
diff --git a/tests/configs/match-localuser-no-arg b/tests/configs/match-localuser-no-arg
new file mode 100644
index 0000000..6623553
--- /dev/null
+++ b/tests/configs/match-localuser-no-arg
@@ -0,0 +1,2 @@
+Match localuser
+ User oops
diff --git a/tests/configs/match-orighost b/tests/configs/match-orighost
new file mode 100644
index 0000000..1054199
--- /dev/null
+++ b/tests/configs/match-orighost
@@ -0,0 +1,16 @@
+HostName bogus
+
+Match originalhost target
+ User tuon
+
+Match originalhost what*
+ User matrim
+
+Match originalhost comma,sep*
+ User chameleon
+
+Match originalhost yep,!nope
+ User skipped
+
+Match !originalhost www !originalhost nope
+ User thom
diff --git a/tests/configs/match-orighost-canonical b/tests/configs/match-orighost-canonical
new file mode 100644
index 0000000..737345e
--- /dev/null
+++ b/tests/configs/match-orighost-canonical
@@ -0,0 +1,5 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+
+Match originalhost www
+ User tuon
diff --git a/tests/configs/match-orighost-no-arg b/tests/configs/match-orighost-no-arg
new file mode 100644
index 0000000..427382b
--- /dev/null
+++ b/tests/configs/match-orighost-no-arg
@@ -0,0 +1,2 @@
+Match originalhost
+ User oops
diff --git a/tests/configs/match-user b/tests/configs/match-user
new file mode 100644
index 0000000..14d6ac1
--- /dev/null
+++ b/tests/configs/match-user
@@ -0,0 +1,14 @@
+Match user gandalf
+ HostName gondor
+
+Match user b*
+ HostName shire
+
+Match user aragorn,frodo
+ HostName moria
+
+Match user gimli,!legolas
+ Port 7373
+
+Match !user sauron
+ HostName mordor
diff --git a/tests/configs/match-user-explicit b/tests/configs/match-user-explicit
new file mode 100644
index 0000000..9a2b1d8
--- /dev/null
+++ b/tests/configs/match-user-explicit
@@ -0,0 +1,4 @@
+User explicit
+
+Match user explicit
+ HostName dumb
diff --git a/tests/configs/match-user-no-arg b/tests/configs/match-user-no-arg
new file mode 100644
index 0000000..65a11ab
--- /dev/null
+++ b/tests/configs/match-user-no-arg
@@ -0,0 +1,2 @@
+Match user
+ User oops
diff --git a/tests/configs/multi-canon-domains b/tests/configs/multi-canon-domains
new file mode 100644
index 0000000..5674b44
--- /dev/null
+++ b/tests/configs/multi-canon-domains
@@ -0,0 +1,5 @@
+CanonicalizeHostname yes
+CanonicalDomains not-a-real-tld paramiko.org
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/no-canon b/tests/configs/no-canon
new file mode 100644
index 0000000..033f8c5
--- /dev/null
+++ b/tests/configs/no-canon
@@ -0,0 +1,5 @@
+CanonicalizeHostname no
+CanonicalDomains paramiko.org
+
+Host www.paramiko.org
+ User rando
diff --git a/tests/configs/robey b/tests/configs/robey
new file mode 100644
index 0000000..b202622
--- /dev/null
+++ b/tests/configs/robey
@@ -0,0 +1,17 @@
+# A timeless classic?
+# NOTE: some lines in here have 'extra' whitespace (incl trailing, and mixed
+# tabs/spaces!) on purpose.
+
+Host *
+ User robey
+ IdentityFile =~/.ssh/id_rsa
+
+# comment
+Host *.example.com
+ User bjork
+Port=3333
+Host *
+ Crazy something dumb
+Host spoo.example.com
+Crazy something else
+
diff --git a/tests/configs/zero-maxdots b/tests/configs/zero-maxdots
new file mode 100644
index 0000000..dc00054
--- /dev/null
+++ b/tests/configs/zero-maxdots
@@ -0,0 +1,9 @@
+CanonicalizeHostname yes
+CanonicalDomains paramiko.org
+CanonicalizeMaxDots 0
+
+Host www.paramiko.org
+ User rando
+
+Host sub.www.paramiko.org
+ User deep
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..12b9728
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,170 @@
+import logging
+import os
+import shutil
+import threading
+from pathlib import Path
+
+from invoke.vendor.lexicon import Lexicon
+
+import pytest
+from paramiko import (
+ SFTPServer,
+ SFTP,
+ Transport,
+ DSSKey,
+ RSAKey,
+ Ed25519Key,
+ ECDSAKey,
+ PKey,
+)
+
+from ._loop import LoopSocket
+from ._stub_sftp import StubServer, StubSFTPServer
+from ._util import _support
+
+from icecream import ic, install as install_ic
+
+
+# Better print() for debugging - use ic()!
+install_ic()
+ic.configureOutput(includeContext=True)
+
+
+# Perform logging by default; pytest will capture and thus hide it normally,
+# presenting it on error/failure. (But also allow turning it off when doing
+# very pinpoint debugging - e.g. using breakpoints, so you don't want output
+# hiding enabled, but also don't want all the logging to gum up the terminal.)
+if not os.environ.get("DISABLE_LOGGING", False):
+ logging.basicConfig(
+ level=logging.DEBUG,
+ # Also make sure to set up timestamping for more sanity when debugging.
+ format="[%(relativeCreated)s]\t%(levelname)s:%(name)s:%(message)s",
+ datefmt="%H:%M:%S",
+ )
+
+
+def make_sftp_folder():
+ """
+ Ensure expected target temp folder exists on the remote end.
+
+ Will clean it out if it already exists.
+ """
+ # TODO: go back to using the sftp functionality itself for folder setup so
+ # we can test against live SFTP servers again someday. (Not clear if anyone
+ # is/was using the old capability for such, though...)
+ # TODO: something that would play nicer with concurrent testing (but
+ # probably e.g. using thread ID or UUIDs or something; not the "count up
+ # until you find one not used!" crap from before...)
+ # TODO: if we want to lock ourselves even harder into localhost-only
+ # testing (probably not?) could use tempdir modules for this for improved
+ # safety. Then again...why would someone have such a folder???
+ path = os.environ.get("TEST_FOLDER", "paramiko-test-target")
+ # Forcibly nuke this directory locally, since at the moment, the below
+ # fixtures only ever run with a locally scoped stub test server.
+ shutil.rmtree(path, ignore_errors=True)
+ # Then create it anew, again locally, for the same reason.
+ os.mkdir(path)
+ return path
+
+
+@pytest.fixture # (scope='session')
+def sftp_server():
+ """
+ Set up an in-memory SFTP server thread. Yields the client Transport/socket.
+
+ The resulting client Transport (along with all the server components) will
+ be the same object throughout the test session; the `sftp` fixture then
+ creates new higher level client objects wrapped around the client
+ Transport, as necessary.
+ """
+ # Sockets & transports
+ socks = LoopSocket()
+ sockc = LoopSocket()
+ sockc.link(socks)
+ # TODO: reuse with new server fixture if possible
+ tc = Transport(sockc)
+ ts = Transport(socks)
+ # Auth
+ host_key = RSAKey.from_private_key_file(_support("rsa.key"))
+ ts.add_server_key(host_key)
+ # Server setup
+ event = threading.Event()
+ server = StubServer()
+ ts.set_subsystem_handler("sftp", SFTPServer, StubSFTPServer)
+ ts.start_server(event, server)
+ # Wait (so client has time to connect? Not sure. Old.)
+ event.wait(1.0)
+ # Make & yield connection.
+ tc.connect(username="slowdive", password="pygmalion")
+ yield tc
+ # TODO: any need for shutdown? Why didn't old suite do so? Or was that the
+ # point of the "join all threads from threading module" crap in test.py?
+
+
+@pytest.fixture
+def sftp(sftp_server):
+ """
+ Yield an SFTP client connected to the global in-session SFTP server thread.
+ """
+ # Client setup
+ client = SFTP.from_transport(sftp_server)
+ # Work in 'remote' folder setup (as it wants to use the client)
+ # TODO: how cleanest to make this available to tests? Doing it this way is
+ # marginally less bad than the previous 'global'-using setup, but not by
+ # much?
+ client.FOLDER = make_sftp_folder()
+ # Yield client to caller
+ yield client
+ # Clean up - as in make_sftp_folder, we assume local-only exec for now.
+ shutil.rmtree(client.FOLDER, ignore_errors=True)
+
+
+key_data = [
+ ["ssh-rsa", RSAKey, "SHA256:OhNL391d/beeFnxxg18AwWVYTAHww+D4djEE7Co0Yng"],
+ ["ssh-dss", DSSKey, "SHA256:uHwwykG099f4M4kfzvFpKCTino0/P03DRbAidpAmPm0"],
+ [
+ "ssh-ed25519",
+ Ed25519Key,
+ "SHA256:J6VESFdD3xSChn8y9PzWzeF+1tl892mOy2TqkMLO4ow",
+ ],
+ [
+ "ecdsa-sha2-nistp256",
+ ECDSAKey,
+ "SHA256:BrQG04oNKUETjKCeL4ifkARASg3yxS/pUHl3wWM26Yg",
+ ],
+]
+for datum in key_data:
+ # Add true first member with human-facing short algo name
+ short = datum[0].replace("ssh-", "").replace("sha2-nistp", "")
+ datum.insert(0, short)
+
+
+@pytest.fixture(scope="session", params=key_data, ids=lambda x: x[0])
+def keys(request):
+ """
+ Yield an object for each known type of key, with attributes:
+
+ - ``short_type``: short identifier, eg ``rsa`` or ``ecdsa-256``
+ - ``full_type``: the "message style" key identifier, eg ``ssh-rsa``, or
+ ``ecdsa-sha2-nistp256``.
+ - ``path``: a pathlib Path object to the fixture key file
+ - ``pkey``: PKey object, which may or may not also have a cert loaded
+ - ``expected_fp``: the expected fingerprint of said key
+ """
+ short_type, key_type, key_class, fingerprint = request.param
+ bag = Lexicon()
+ bag.short_type = short_type
+ bag.full_type = key_type
+ bag.path = Path(_support(f"{short_type}.key"))
+ with bag.path.open() as fd:
+ bag.pkey = key_class.from_private_key(fd)
+ # Second copy for things like equality-but-not-identity testing
+ with bag.path.open() as fd:
+ bag.pkey2 = key_class.from_private_key(fd)
+ bag.expected_fp = fingerprint
+ # Also tack on the cert-bearing variant for some tests
+ cert = bag.path.with_suffix(".key-cert.pub")
+ bag.pkey_with_cert = PKey.from_path(cert) if cert.exists() else None
+ # Safety checks
+ assert bag.pkey.fingerprint == fingerprint
+ yield bag
diff --git a/tests/pkey.py b/tests/pkey.py
new file mode 100644
index 0000000..691fda0
--- /dev/null
+++ b/tests/pkey.py
@@ -0,0 +1,229 @@
+from pathlib import Path
+from unittest.mock import patch, call
+
+from pytest import raises
+
+from cryptography.hazmat.primitives.asymmetric.ed448 import Ed448PrivateKey
+from paramiko import (
+ DSSKey,
+ ECDSAKey,
+ Ed25519Key,
+ Message,
+ PKey,
+ PublicBlob,
+ RSAKey,
+ UnknownKeyType,
+)
+
+from ._util import _support
+
+
+class PKey_:
+ # NOTE: this is incidentally tested by a number of other tests, such as the
+ # agent.py test suite
+ class from_type_string:
+ def loads_from_type_and_bytes(self, keys):
+ obj = PKey.from_type_string(keys.full_type, keys.pkey.asbytes())
+ assert obj == keys.pkey
+
+ # TODO: exceptions
+ #
+ # TODO: passphrase? OTOH since this is aimed at the agent...irrelephant
+
+ class from_path:
+ def loads_from_Path(self, keys):
+ obj = PKey.from_path(keys.path)
+ assert obj == keys.pkey
+
+ def loads_from_str(self):
+ key = PKey.from_path(str(_support("rsa.key")))
+ assert isinstance(key, RSAKey)
+
+ @patch("paramiko.pkey.Path")
+ def expands_user(self, mPath):
+ # real key for guts that want a real key format
+ mykey = Path(_support("rsa.key"))
+ pathy = mPath.return_value.expanduser.return_value
+ # read_bytes for cryptography.io's loaders
+ pathy.read_bytes.return_value = mykey.read_bytes()
+ # open() for our own class loader
+ pathy.open.return_value = mykey.open()
+ # fake out exists() to avoid attempts to load cert
+ pathy.exists.return_value = False
+ PKey.from_path("whatever") # we're not testing expanduser itself
+ # Both key and cert paths
+ mPath.return_value.expanduser.assert_has_calls([call(), call()])
+
+ def raises_UnknownKeyType_for_unknown_types(self):
+ # I.e. a real, becomes a useful object via cryptography.io, key
+ # class that we do NOT support. Chose Ed448 randomly as OpenSSH
+ # doesn't seem to support it either, going by ssh-keygen...
+ keypath = _support("ed448.key")
+ with raises(UnknownKeyType) as exc:
+ PKey.from_path(keypath)
+ assert issubclass(exc.value.key_type, Ed448PrivateKey)
+ with open(keypath, "rb") as fd:
+ assert exc.value.key_bytes == fd.read()
+
+ def leaves_cryptography_exceptions_untouched(self):
+ # a Python file is not a private key!
+ with raises(ValueError):
+ PKey.from_path(__file__)
+
+ # TODO: passphrase support tested
+
+ class automatically_loads_certificates:
+ def existing_cert_loaded_when_given_key_path(self):
+ key = PKey.from_path(_support("rsa.key"))
+ # Public blob exists despite no .load_certificate call
+ assert key.public_blob is not None
+ assert (
+ key.public_blob.key_type == "ssh-rsa-cert-v01@openssh.com"
+ )
+ # And it's definitely the one we expected
+ assert key.public_blob == PublicBlob.from_file(
+ _support("rsa.key-cert.pub")
+ )
+
+ def can_be_given_cert_path_instead(self):
+ key = PKey.from_path(_support("rsa.key-cert.pub"))
+ # It's still a key, not a PublicBlob
+ assert isinstance(key, RSAKey)
+ # Public blob exists despite no .load_certificate call
+ assert key.public_blob is not None
+ assert (
+ key.public_blob.key_type == "ssh-rsa-cert-v01@openssh.com"
+ )
+ # And it's definitely the one we expected
+ assert key.public_blob == PublicBlob.from_file(
+ _support("rsa.key-cert.pub")
+ )
+
+ def no_cert_load_if_no_cert(self):
+ # This key exists (it's a copy of the regular one) but has no
+ # matching -cert.pub
+ key = PKey.from_path(_support("rsa-lonely.key"))
+ assert key.public_blob is None
+
+ def excepts_usefully_if_no_key_only_cert(self):
+ # TODO: is that truly an error condition? the cert is ~the
+ # pubkey and we still require the privkey for signing, yea?
+ # This cert exists (it's a copy of the regular one) but there's
+ # no rsa-missing.key to load.
+ with raises(FileNotFoundError) as info:
+ PKey.from_path(_support("rsa-missing.key-cert.pub"))
+ assert info.value.filename.endswith("rsa-missing.key")
+
+ class load_certificate:
+ def rsa_public_cert_blobs(self):
+ # Data to test signing with (arbitrary)
+ data = b"ice weasels"
+ # Load key w/o cert at first (so avoiding .from_path)
+ key = RSAKey.from_private_key_file(_support("rsa.key"))
+ assert key.public_blob is None
+ # Sign regular-style (using, arbitrarily, SHA2)
+ msg = key.sign_ssh_data(data, "rsa-sha2-256")
+ msg.rewind()
+ assert "rsa-sha2-256" == msg.get_text()
+ signed = msg.get_binary() # for comparison later
+
+ # Load cert and inspect its internals
+ key.load_certificate(_support("rsa.key-cert.pub"))
+ assert key.public_blob is not None
+ assert key.public_blob.key_type == "ssh-rsa-cert-v01@openssh.com"
+ assert key.public_blob.comment == "test_rsa.key.pub"
+ msg = Message(key.public_blob.key_blob)
+ # cert type
+ assert msg.get_text() == "ssh-rsa-cert-v01@openssh.com"
+ # nonce
+ msg.get_string()
+ # public numbers
+ assert msg.get_mpint() == key.public_numbers.e
+ assert msg.get_mpint() == key.public_numbers.n
+ # serial number
+ assert msg.get_int64() == 1234
+ # TODO: whoever wrote the OG tests didn't care about the remaining
+ # fields from
+ # https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.certkeys
+ # so neither do I, for now...
+
+ # Sign cert-style (still SHA256 - so this actually does almost
+ # exactly the same thing under the hood as the previous sign)
+ msg = key.sign_ssh_data(data, "rsa-sha2-256-cert-v01@openssh.com")
+ msg.rewind()
+ assert "rsa-sha2-256" == msg.get_text()
+ assert signed == msg.get_binary() # same signature as above
+ msg.rewind()
+ assert key.verify_ssh_sig(b"ice weasels", msg) # our data verified
+
+ def loading_cert_of_different_type_from_key_raises_ValueError(self):
+ edkey = Ed25519Key.from_private_key_file(_support("ed25519.key"))
+ err = "PublicBlob type ssh-rsa-cert-v01@openssh.com incompatible with key type ssh-ed25519" # noqa
+ with raises(ValueError, match=err):
+ edkey.load_certificate(_support("rsa.key-cert.pub"))
+
+ def fingerprint(self, keys):
+ # NOTE: Hardcoded fingerprint expectation stored in fixture.
+ assert keys.pkey.fingerprint == keys.expected_fp
+
+ def algorithm_name(self, keys):
+ key = keys.pkey
+ if isinstance(key, RSAKey):
+ assert key.algorithm_name == "RSA"
+ elif isinstance(key, DSSKey):
+ assert key.algorithm_name == "DSS"
+ elif isinstance(key, ECDSAKey):
+ assert key.algorithm_name == "ECDSA"
+ elif isinstance(key, Ed25519Key):
+ assert key.algorithm_name == "ED25519"
+ # TODO: corner case: AgentKey, whose .name can be cert-y (due to the
+ # value of the name field passed via agent protocol) and thus
+ # algorithm_name is eg "RSA-CERT" - keys loaded directly from disk will
+ # never look this way, even if they have a .public_blob attached.
+
+ class equality_and_hashing:
+ def same_key_is_equal_to_itself(self, keys):
+ assert keys.pkey == keys.pkey2
+
+ def same_key_same_hash(self, keys):
+ # NOTE: this isn't a great test due to hashseed randomization under
+ # Python 3 preventing use of static values, but it does still prove
+ # that __hash__ is implemented/doesn't explode & works across
+ # instances
+ assert hash(keys.pkey) == hash(keys.pkey2)
+
+ def keys_are_not_equal_to_other_types(self, keys):
+ for value in [None, True, ""]:
+ assert keys.pkey != value
+
+ class identifiers_classmethods:
+ def default_is_class_name_attribute(self):
+ # NOTE: not all classes _have_ this, only the ones that don't
+ # customize identifiers().
+ class MyKey(PKey):
+ name = "it me"
+
+ assert MyKey.identifiers() == ["it me"]
+
+ def rsa_is_all_combos_of_cert_and_sha_type(self):
+ assert RSAKey.identifiers() == [
+ "ssh-rsa",
+ "ssh-rsa-cert-v01@openssh.com",
+ "rsa-sha2-256",
+ "rsa-sha2-256-cert-v01@openssh.com",
+ "rsa-sha2-512",
+ "rsa-sha2-512-cert-v01@openssh.com",
+ ]
+
+ def dss_is_protocol_name(self):
+ assert DSSKey.identifiers() == ["ssh-dss"]
+
+ def ed25519_is_protocol_name(self):
+ assert Ed25519Key.identifiers() == ["ssh-ed25519"]
+
+ def ecdsa_is_all_curve_names(self):
+ assert ECDSAKey.identifiers() == [
+ "ecdsa-sha2-nistp256",
+ "ecdsa-sha2-nistp384",
+ "ecdsa-sha2-nistp521",
+ ]
diff --git a/tests/test_buffered_pipe.py b/tests/test_buffered_pipe.py
new file mode 100644
index 0000000..35e2cde
--- /dev/null
+++ b/tests/test_buffered_pipe.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for BufferedPipe.
+"""
+
+import threading
+import time
+import unittest
+
+from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
+from paramiko import pipe
+
+
+def delay_thread(p):
+ p.feed("a")
+ time.sleep(0.5)
+ p.feed("b")
+ p.close()
+
+
+def close_thread(p):
+ time.sleep(0.2)
+ p.close()
+
+
+class BufferedPipeTest(unittest.TestCase):
+ def test_buffered_pipe(self):
+ p = BufferedPipe()
+ self.assertTrue(not p.read_ready())
+ p.feed("hello.")
+ self.assertTrue(p.read_ready())
+ data = p.read(6)
+ self.assertEqual(b"hello.", data)
+
+ p.feed("plus/minus")
+ self.assertEqual(b"plu", p.read(3))
+ self.assertEqual(b"s/m", p.read(3))
+ self.assertEqual(b"inus", p.read(4))
+
+ p.close()
+ self.assertTrue(not p.read_ready())
+ self.assertEqual(b"", p.read(1))
+
+ def test_delay(self):
+ p = BufferedPipe()
+ self.assertTrue(not p.read_ready())
+ threading.Thread(target=delay_thread, args=(p,)).start()
+ self.assertEqual(b"a", p.read(1, 0.1))
+ try:
+ p.read(1, 0.1)
+ self.assertTrue(False)
+ except PipeTimeout:
+ pass
+ self.assertEqual(b"b", p.read(1, 1.0))
+ self.assertEqual(b"", p.read(1))
+
+ def test_close_while_reading(self):
+ p = BufferedPipe()
+ threading.Thread(target=close_thread, args=(p,)).start()
+ data = p.read(1, 1.0)
+ self.assertEqual(b"", data)
+
+ def test_or_pipe(self):
+ p = pipe.make_pipe()
+ p1, p2 = pipe.make_or_pipe(p)
+ self.assertFalse(p._set)
+ p1.set()
+ self.assertTrue(p._set)
+ p2.set()
+ self.assertTrue(p._set)
+ p1.clear()
+ self.assertTrue(p._set)
+ p2.clear()
+ self.assertFalse(p._set)
diff --git a/tests/test_channelfile.py b/tests/test_channelfile.py
new file mode 100644
index 0000000..e2b6306
--- /dev/null
+++ b/tests/test_channelfile.py
@@ -0,0 +1,60 @@
+from unittest.mock import patch, MagicMock
+
+from paramiko import Channel, ChannelFile, ChannelStderrFile, ChannelStdinFile
+
+
+class ChannelFileBase:
+ @patch("paramiko.channel.ChannelFile._set_mode")
+ def test_defaults_to_unbuffered_reading(self, setmode):
+ self.klass(Channel(None))
+ setmode.assert_called_once_with("r", -1)
+
+ @patch("paramiko.channel.ChannelFile._set_mode")
+ def test_can_override_mode_and_bufsize(self, setmode):
+ self.klass(Channel(None), mode="w", bufsize=25)
+ setmode.assert_called_once_with("w", 25)
+
+ def test_read_recvs_from_channel(self):
+ chan = MagicMock()
+ cf = self.klass(chan)
+ cf.read(100)
+ chan.recv.assert_called_once_with(100)
+
+ def test_write_calls_channel_sendall(self):
+ chan = MagicMock()
+ cf = self.klass(chan, mode="w")
+ cf.write("ohai")
+ chan.sendall.assert_called_once_with(b"ohai")
+
+
+class TestChannelFile(ChannelFileBase):
+ klass = ChannelFile
+
+
+class TestChannelStderrFile:
+ def test_read_calls_channel_recv_stderr(self):
+ chan = MagicMock()
+ cf = ChannelStderrFile(chan)
+ cf.read(100)
+ chan.recv_stderr.assert_called_once_with(100)
+
+ def test_write_calls_channel_sendall(self):
+ chan = MagicMock()
+ cf = ChannelStderrFile(chan, mode="w")
+ cf.write("ohai")
+ chan.sendall_stderr.assert_called_once_with(b"ohai")
+
+
+class TestChannelStdinFile(ChannelFileBase):
+ klass = ChannelStdinFile
+
+ def test_close_calls_channel_shutdown_write(self):
+ chan = MagicMock()
+ cf = ChannelStdinFile(chan, mode="wb")
+ cf.flush = MagicMock()
+ cf.close()
+ # Sanity check that we still call BufferedFile.close()
+ cf.flush.assert_called_once_with()
+ assert cf._closed is True
+ # Actual point of test
+ chan.shutdown_write.assert_called_once_with()
diff --git a/tests/test_client.py b/tests/test_client.py
new file mode 100644
index 0000000..1c0c6c8
--- /dev/null
+++ b/tests/test_client.py
@@ -0,0 +1,837 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for SSHClient.
+"""
+
+
+import gc
+import os
+import platform
+import socket
+import threading
+import time
+import unittest
+import warnings
+import weakref
+from tempfile import mkstemp
+
+import pytest
+from pytest_relaxed import raises
+from unittest.mock import patch, Mock
+
+import paramiko
+from paramiko import SSHClient
+from paramiko.pkey import PublicBlob
+from paramiko.ssh_exception import SSHException, AuthenticationException
+
+from ._util import _support, requires_sha1_signing, slow
+
+
+requires_gss_auth = unittest.skipUnless(
+ paramiko.GSS_AUTH_AVAILABLE, "GSS auth not available"
+)
+
+FINGERPRINTS = {
+ "ssh-dss": b"\x44\x78\xf0\xb9\xa2\x3c\xc5\x18\x20\x09\xff\x75\x5b\xc1\xd2\x6c", # noqa
+ "ssh-rsa": b"\x60\x73\x38\x44\xcb\x51\x86\x65\x7f\xde\xda\xa2\x2b\x5a\x57\xd5", # noqa
+ "ecdsa-sha2-nistp256": b"\x25\x19\xeb\x55\xe6\xa1\x47\xff\x4f\x38\xd2\x75\x6f\xa5\xd5\x60", # noqa
+ "ssh-ed25519": b'\xb3\xd5"\xaa\xf9u^\xe8\xcd\x0e\xea\x02\xb9)\xa2\x80',
+}
+
+
+class NullServer(paramiko.ServerInterface):
+ def __init__(self, *args, **kwargs):
+ # Allow tests to enable/disable specific key types
+ self.__allowed_keys = kwargs.pop("allowed_keys", [])
+ # And allow them to set a (single...meh) expected public blob (cert)
+ self.__expected_public_blob = kwargs.pop("public_blob", None)
+ super().__init__(*args, **kwargs)
+
+ def get_allowed_auths(self, username):
+ if username == "slowdive":
+ return "publickey,password"
+ return "publickey"
+
+ def check_auth_password(self, username, password):
+ if (username == "slowdive") and (password == "pygmalion"):
+ return paramiko.AUTH_SUCCESSFUL
+ if (username == "slowdive") and (password == "unresponsive-server"):
+ time.sleep(5)
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def check_auth_publickey(self, username, key):
+ try:
+ expected = FINGERPRINTS[key.get_name()]
+ except KeyError:
+ return paramiko.AUTH_FAILED
+ # Base check: allowed auth type & fingerprint matches
+ happy = (
+ key.get_name() in self.__allowed_keys
+ and key.get_fingerprint() == expected
+ )
+ # Secondary check: if test wants assertions about cert data
+ if (
+ self.__expected_public_blob is not None
+ and key.public_blob != self.__expected_public_blob
+ ):
+ happy = False
+ return paramiko.AUTH_SUCCESSFUL if happy else paramiko.AUTH_FAILED
+
+ def check_channel_request(self, kind, chanid):
+ return paramiko.OPEN_SUCCEEDED
+
+ def check_channel_exec_request(self, channel, command):
+ if command != b"yes":
+ return False
+ return True
+
+ def check_channel_env_request(self, channel, name, value):
+ if name == "INVALID_ENV":
+ return False
+
+ if not hasattr(channel, "env"):
+ setattr(channel, "env", {})
+
+ channel.env[name] = value
+ return True
+
+
+class ClientTest(unittest.TestCase):
+ def setUp(self):
+ self.sockl = socket.socket()
+ self.sockl.bind(("localhost", 0))
+ self.sockl.listen(1)
+ self.addr, self.port = self.sockl.getsockname()
+ self.connect_kwargs = dict(
+ hostname=self.addr,
+ port=self.port,
+ username="slowdive",
+ look_for_keys=False,
+ )
+ self.event = threading.Event()
+ self.kill_event = threading.Event()
+
+ def tearDown(self):
+ # Shut down client Transport
+ if hasattr(self, "tc"):
+ self.tc.close()
+ # Shut down shared socket
+ if hasattr(self, "sockl"):
+ # Signal to server thread that it should shut down early; it checks
+ # this immediately after accept(). (In scenarios where connection
+ # actually succeeded during the test, this becomes a no-op.)
+ self.kill_event.set()
+ # Forcibly connect to server sock in case the server thread is
+ # hanging out in its accept() (e.g. if the client side of the test
+ # fails before it even gets to connecting); there's no other good
+ # way to force an accept() to exit.
+ put_a_sock_in_it = socket.socket()
+ put_a_sock_in_it.connect((self.addr, self.port))
+ put_a_sock_in_it.close()
+ # Then close "our" end of the socket (which _should_ cause the
+ # accept() to bail out, but does not, for some reason. I blame
+ # threading.)
+ self.sockl.close()
+
+ def _run(
+ self,
+ allowed_keys=None,
+ delay=0,
+ public_blob=None,
+ kill_event=None,
+ server_name=None,
+ ):
+ if allowed_keys is None:
+ allowed_keys = FINGERPRINTS.keys()
+ self.socks, addr = self.sockl.accept()
+ # If the kill event was set at this point, it indicates an early
+ # shutdown, so bail out now and don't even try setting up a Transport
+ # (which will just verbosely die.)
+ if kill_event and kill_event.is_set():
+ self.socks.close()
+ return
+ self.ts = paramiko.Transport(self.socks)
+ if server_name is not None:
+ self.ts.local_version = server_name
+ keypath = _support("rsa.key")
+ host_key = paramiko.RSAKey.from_private_key_file(keypath)
+ self.ts.add_server_key(host_key)
+ keypath = _support("ecdsa-256.key")
+ host_key = paramiko.ECDSAKey.from_private_key_file(keypath)
+ self.ts.add_server_key(host_key)
+ server = NullServer(allowed_keys=allowed_keys, public_blob=public_blob)
+ if delay:
+ time.sleep(delay)
+ self.ts.start_server(self.event, server)
+
+ def _test_connection(self, **kwargs):
+ """
+ (Most) kwargs get passed directly into SSHClient.connect().
+
+ The exceptions are ``allowed_keys``/``public_blob``/``server_name``
+ which are stripped and handed to the ``NullServer`` used for testing.
+ """
+ run_kwargs = {"kill_event": self.kill_event}
+ for key in ("allowed_keys", "public_blob", "server_name"):
+ run_kwargs[key] = kwargs.pop(key, None)
+ # Server setup
+ threading.Thread(target=self._run, kwargs=run_kwargs).start()
+ host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = paramiko.RSAKey(data=host_key.asbytes())
+
+ # Client setup
+ self.tc = SSHClient()
+ self.tc.get_host_keys().add(
+ f"[{self.addr}]:{self.port}", "ssh-rsa", public_host_key
+ )
+
+ # Actual connection
+ self.tc.connect(**dict(self.connect_kwargs, **kwargs))
+
+ # Authentication successful?
+ self.event.wait(1.0)
+ self.assertTrue(self.event.is_set())
+ self.assertTrue(self.ts.is_active())
+ self.assertEqual(
+ self.connect_kwargs["username"], self.ts.get_username()
+ )
+ self.assertEqual(True, self.ts.is_authenticated())
+ self.assertEqual(False, self.tc.get_transport().gss_kex_used)
+
+ # Command execution functions?
+ stdin, stdout, stderr = self.tc.exec_command("yes")
+ schan = self.ts.accept(1.0)
+
+ # Nobody else tests the API of exec_command so let's do it here for
+ # now. :weary:
+ assert isinstance(stdin, paramiko.ChannelStdinFile)
+ assert isinstance(stdout, paramiko.ChannelFile)
+ assert isinstance(stderr, paramiko.ChannelStderrFile)
+
+ schan.send("Hello there.\n")
+ schan.send_stderr("This is on stderr.\n")
+ schan.close()
+
+ self.assertEqual("Hello there.\n", stdout.readline())
+ self.assertEqual("", stdout.readline())
+ self.assertEqual("This is on stderr.\n", stderr.readline())
+ self.assertEqual("", stderr.readline())
+
+ # Cleanup
+ stdin.close()
+ stdout.close()
+ stderr.close()
+
+
+class SSHClientTest(ClientTest):
+ @requires_sha1_signing
+ def test_client(self):
+ """
+ verify that the SSHClient stuff works too.
+ """
+ self._test_connection(password="pygmalion")
+
+ @requires_sha1_signing
+ def test_client_dsa(self):
+ """
+ verify that SSHClient works with a DSA key.
+ """
+ self._test_connection(key_filename=_support("dss.key"))
+
+ @requires_sha1_signing
+ def test_client_rsa(self):
+ """
+ verify that SSHClient works with an RSA key.
+ """
+ self._test_connection(key_filename=_support("rsa.key"))
+
+ @requires_sha1_signing
+ def test_client_ecdsa(self):
+ """
+ verify that SSHClient works with an ECDSA key.
+ """
+ self._test_connection(key_filename=_support("ecdsa-256.key"))
+
+ @requires_sha1_signing
+ def test_client_ed25519(self):
+ self._test_connection(key_filename=_support("ed25519.key"))
+
+ @requires_sha1_signing
+ def test_multiple_key_files(self):
+ """
+ verify that SSHClient accepts and tries multiple key files.
+ """
+ # This is dumb :(
+ types_ = {
+ "rsa": "ssh-rsa",
+ "dss": "ssh-dss",
+ "ecdsa": "ecdsa-sha2-nistp256",
+ }
+ # Various combos of attempted & valid keys
+ # TODO: try every possible combo using itertools functions
+ # TODO: use new key(s) fixture(s)
+ for attempt, accept in (
+ (["rsa", "dss"], ["dss"]), # Original test #3
+ (["dss", "rsa"], ["dss"]), # Ordering matters sometimes, sadly
+ (["dss", "rsa", "ecdsa-256"], ["dss"]), # Try ECDSA but fail
+ (["rsa", "ecdsa-256"], ["ecdsa"]), # ECDSA success
+ ):
+ try:
+ self._test_connection(
+ key_filename=[
+ _support("{}.key".format(x)) for x in attempt
+ ],
+ allowed_keys=[types_[x] for x in accept],
+ )
+ finally:
+ # Clean up to avoid occasional gc-related deadlocks.
+ # TODO: use nose test generators after nose port
+ self.tearDown()
+ self.setUp()
+
+ @requires_sha1_signing
+ def test_multiple_key_files_failure(self):
+ """
+ Expect failure when multiple keys in play and none are accepted
+ """
+ # Until #387 is fixed we have to catch a high-up exception since
+ # various platforms trigger different errors here >_<
+ self.assertRaises(
+ SSHException,
+ self._test_connection,
+ key_filename=[_support("rsa.key")],
+ allowed_keys=["ecdsa-sha2-nistp256"],
+ )
+
+ @requires_sha1_signing
+ def test_certs_allowed_as_key_filename_values(self):
+ # NOTE: giving cert path here, not key path. (Key path test is below.
+ # They're similar except for which path is given; the expected auth and
+ # server-side behavior is 100% identical.)
+ # NOTE: only bothered whipping up one cert per overall class/family.
+ for type_ in ("rsa", "dss", "ecdsa-256", "ed25519"):
+ key_path = _support(f"{type_}.key")
+ self._test_connection(
+ key_filename=key_path,
+ public_blob=PublicBlob.from_file(f"{key_path}-cert.pub"),
+ )
+
+ @requires_sha1_signing
+ def test_certs_implicitly_loaded_alongside_key_filename_keys(self):
+ # NOTE: a regular test_connection() w/ rsa.key would incidentally
+ # test this (because test_xxx.key-cert.pub exists) but incidental tests
+ # stink, so NullServer and friends were updated to allow assertions
+ # about the server-side key object's public blob. Thus, we can prove
+ # that a specific cert was found, along with regular authorization
+ # succeeding proving that the overall flow works.
+ for type_ in ("rsa", "dss", "ecdsa-256", "ed25519"):
+ key_path = _support(f"{type_}.key")
+ self._test_connection(
+ key_filename=key_path,
+ public_blob=PublicBlob.from_file(f"{key_path}-cert.pub"),
+ )
+
+ def _cert_algo_test(self, ver, alg):
+ # Issue #2017; see auth_handler.py
+ self.connect_kwargs["username"] = "somecertuser" # neuter pw auth
+ self._test_connection(
+ # NOTE: SSHClient is able to take either the key or the cert & will
+ # set up its internals as needed
+ key_filename=_support("rsa.key-cert.pub"),
+ server_name="SSH-2.0-OpenSSH_{}".format(ver),
+ )
+ assert (
+ self.tc._transport._agreed_pubkey_algorithm
+ == "{}-cert-v01@openssh.com".format(alg)
+ )
+
+ @requires_sha1_signing
+ def test_old_openssh_needs_ssh_rsa_for_certs_not_rsa_sha2(self):
+ self._cert_algo_test(ver="7.7", alg="ssh-rsa")
+
+ @requires_sha1_signing
+ def test_newer_openssh_uses_rsa_sha2_for_certs_not_ssh_rsa(self):
+ # NOTE: 512 happens to be first in our list and is thus chosen
+ self._cert_algo_test(ver="7.8", alg="rsa-sha2-512")
+
+ def test_default_key_locations_trigger_cert_loads_if_found(self):
+ # TODO: what it says on the tin: ~/.ssh/id_rsa tries to load
+ # ~/.ssh/id_rsa-cert.pub. Right now no other tests actually test that
+ # code path (!) so we're punting too, sob.
+ pass
+
+ def test_auto_add_policy(self):
+ """
+ verify that SSHClient's AutoAddPolicy works.
+ """
+ threading.Thread(target=self._run).start()
+ hostname = f"[{self.addr}]:{self.port}"
+ key_file = _support("ecdsa-256.key")
+ public_host_key = paramiko.ECDSAKey.from_private_key_file(key_file)
+
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.assertEqual(0, len(self.tc.get_host_keys()))
+ self.tc.connect(password="pygmalion", **self.connect_kwargs)
+
+ self.event.wait(1.0)
+ self.assertTrue(self.event.is_set())
+ self.assertTrue(self.ts.is_active())
+ self.assertEqual("slowdive", self.ts.get_username())
+ self.assertEqual(True, self.ts.is_authenticated())
+ self.assertEqual(1, len(self.tc.get_host_keys()))
+ new_host_key = list(self.tc.get_host_keys()[hostname].values())[0]
+ self.assertEqual(public_host_key, new_host_key)
+
+ def test_save_host_keys(self):
+ """
+ verify that SSHClient correctly saves a known_hosts file.
+ """
+ warnings.filterwarnings("ignore", "tempnam.*")
+
+ host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = paramiko.RSAKey(data=host_key.asbytes())
+ fd, localname = mkstemp()
+ os.close(fd)
+
+ client = SSHClient()
+ assert len(client.get_host_keys()) == 0
+
+ host_id = f"[{self.addr}]:{self.port}"
+
+ client.get_host_keys().add(host_id, "ssh-rsa", public_host_key)
+ assert len(client.get_host_keys()) == 1
+ assert public_host_key == client.get_host_keys()[host_id]["ssh-rsa"]
+
+ client.save_host_keys(localname)
+
+ with open(localname) as fd:
+ assert host_id in fd.read()
+
+ os.unlink(localname)
+
+ def test_cleanup(self):
+ """
+ verify that when an SSHClient is collected, its transport (and the
+ transport's packetizer) is closed.
+ """
+ # Skipped on PyPy because it fails on CI for unknown reasons
+ if platform.python_implementation() == "PyPy":
+ return
+
+ threading.Thread(target=self._run).start()
+
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ assert len(self.tc.get_host_keys()) == 0
+ self.tc.connect(**dict(self.connect_kwargs, password="pygmalion"))
+
+ self.event.wait(1.0)
+ assert self.event.is_set()
+ assert self.ts.is_active()
+
+ p = weakref.ref(self.tc._transport.packetizer)
+ assert p() is not None
+ self.tc.close()
+ del self.tc
+
+ # force a collection to see whether the SSHClient object is deallocated
+ # 2 GCs are needed on PyPy, time is needed for Python 3
+ # TODO 4.0: this still fails randomly under CircleCI under Python 3.7,
+ # 3.8 at the very least. bumped sleep 0.3->1.0s but the underlying
+ # functionality should get reevaluated now we've dropped Python 2.
+ time.sleep(1)
+ gc.collect()
+ gc.collect()
+
+ assert p() is None
+
+ @patch("paramiko.client.socket.socket")
+ @patch("paramiko.client.socket.getaddrinfo")
+ def test_closes_socket_on_socket_errors(self, getaddrinfo, mocket):
+ getaddrinfo.return_value = (
+ ("irrelevant", None, None, None, "whatever"),
+ )
+
+ class SocksToBeYou(socket.error):
+ pass
+
+ my_socket = mocket.return_value
+ my_socket.connect.side_effect = SocksToBeYou
+ client = SSHClient()
+ with pytest.raises(SocksToBeYou):
+ client.connect(hostname="nope")
+ my_socket.close.assert_called_once_with()
+
+ def test_client_can_be_used_as_context_manager(self):
+ """
+ verify that an SSHClient can be used a context manager
+ """
+ threading.Thread(target=self._run).start()
+
+ with SSHClient() as tc:
+ self.tc = tc
+ self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ assert len(self.tc.get_host_keys()) == 0
+ self.tc.connect(**dict(self.connect_kwargs, password="pygmalion"))
+
+ self.event.wait(1.0)
+ self.assertTrue(self.event.is_set())
+ self.assertTrue(self.ts.is_active())
+
+ self.assertTrue(self.tc._transport is not None)
+
+ self.assertTrue(self.tc._transport is None)
+
+ def test_banner_timeout(self):
+ """
+ verify that the SSHClient has a configurable banner timeout.
+ """
+ # Start the thread with a 1 second wait.
+ threading.Thread(target=self._run, kwargs={"delay": 1}).start()
+ host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = paramiko.RSAKey(data=host_key.asbytes())
+
+ self.tc = SSHClient()
+ self.tc.get_host_keys().add(
+ f"[{self.addr}]:{self.port}", "ssh-rsa", public_host_key
+ )
+ # Connect with a half second banner timeout.
+ kwargs = dict(self.connect_kwargs, banner_timeout=0.5)
+ self.assertRaises(paramiko.SSHException, self.tc.connect, **kwargs)
+
+ @requires_sha1_signing
+ def test_auth_trickledown(self):
+ """
+ Failed key auth doesn't prevent subsequent pw auth from succeeding
+ """
+ # NOTE: re #387, re #394
+ # If pkey module used within Client._auth isn't correctly handling auth
+ # errors (e.g. if it allows things like ValueError to bubble up as per
+ # midway through #394) client.connect() will fail (at key load step)
+ # instead of succeeding (at password step)
+ kwargs = dict(
+ # Password-protected key whose passphrase is not 'pygmalion' (it's
+ # 'television' as per tests/test_pkey.py). NOTE: must use
+ # key_filename, loading the actual key here with PKey will except
+ # immediately; we're testing the try/except crap within Client.
+ key_filename=[_support("test_rsa_password.key")],
+ # Actual password for default 'slowdive' user
+ password="pygmalion",
+ )
+ self._test_connection(**kwargs)
+
+ @requires_sha1_signing
+ @slow
+ def test_auth_timeout(self):
+ """
+ verify that the SSHClient has a configurable auth timeout
+ """
+ # Connect with a half second auth timeout
+ self.assertRaises(
+ AuthenticationException,
+ self._test_connection,
+ password="unresponsive-server",
+ auth_timeout=0.5,
+ )
+
+ @patch.object(
+ paramiko.Channel,
+ "_set_remote_channel",
+ lambda *args, **kwargs: time.sleep(100),
+ )
+ def test_channel_timeout(self):
+ """
+ verify that the SSHClient has a configurable channel timeout
+ """
+ threading.Thread(target=self._run).start()
+ # Client setup
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ # Actual connection
+ self.tc.connect(
+ **dict(
+ self.connect_kwargs, password="pygmalion", channel_timeout=0.5
+ )
+ )
+ self.event.wait(1.0)
+
+ self.assertRaises(paramiko.SSHException, self.tc.open_sftp)
+
+ @requires_gss_auth
+ def test_auth_trickledown_gsskex(self):
+ """
+ Failed gssapi-keyex doesn't prevent subsequent key from succeeding
+ """
+ kwargs = dict(gss_kex=True, key_filename=[_support("rsa.key")])
+ self._test_connection(**kwargs)
+
+ @requires_gss_auth
+ def test_auth_trickledown_gssauth(self):
+ """
+ Failed gssapi-with-mic doesn't prevent subsequent key from succeeding
+ """
+ kwargs = dict(gss_auth=True, key_filename=[_support("rsa.key")])
+ self._test_connection(**kwargs)
+
+ def test_reject_policy(self):
+ """
+ verify that SSHClient's RejectPolicy works.
+ """
+ threading.Thread(target=self._run).start()
+
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.RejectPolicy())
+ self.assertEqual(0, len(self.tc.get_host_keys()))
+ self.assertRaises(
+ paramiko.SSHException,
+ self.tc.connect,
+ password="pygmalion",
+ **self.connect_kwargs,
+ )
+
+ @requires_gss_auth
+ def test_reject_policy_gsskex(self):
+ """
+ verify that SSHClient's RejectPolicy works,
+ even if gssapi-keyex was enabled but not used.
+ """
+ # Test for a bug present in paramiko versions released before
+ # 2017-08-01
+ threading.Thread(target=self._run).start()
+
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.RejectPolicy())
+ self.assertEqual(0, len(self.tc.get_host_keys()))
+ self.assertRaises(
+ paramiko.SSHException,
+ self.tc.connect,
+ password="pygmalion",
+ gss_kex=True,
+ **self.connect_kwargs,
+ )
+
+ def _client_host_key_bad(self, host_key):
+ threading.Thread(target=self._run).start()
+ hostname = f"[{self.addr}]:{self.port}"
+
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.WarningPolicy())
+ known_hosts = self.tc.get_host_keys()
+ known_hosts.add(hostname, host_key.get_name(), host_key)
+
+ self.assertRaises(
+ paramiko.BadHostKeyException,
+ self.tc.connect,
+ password="pygmalion",
+ **self.connect_kwargs,
+ )
+
+ def _client_host_key_good(self, ktype, kfile):
+ threading.Thread(target=self._run).start()
+ hostname = f"[{self.addr}]:{self.port}"
+
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.RejectPolicy())
+ host_key = ktype.from_private_key_file(_support(kfile))
+ known_hosts = self.tc.get_host_keys()
+ known_hosts.add(hostname, host_key.get_name(), host_key)
+
+ self.tc.connect(password="pygmalion", **self.connect_kwargs)
+ self.event.wait(1.0)
+ self.assertTrue(self.event.is_set())
+ self.assertTrue(self.ts.is_active())
+ self.assertEqual(True, self.ts.is_authenticated())
+
+ def test_host_key_negotiation_1(self):
+ host_key = paramiko.ECDSAKey.generate()
+ self._client_host_key_bad(host_key)
+
+ @requires_sha1_signing
+ def test_host_key_negotiation_2(self):
+ host_key = paramiko.RSAKey.generate(2048)
+ self._client_host_key_bad(host_key)
+
+ def test_host_key_negotiation_3(self):
+ self._client_host_key_good(paramiko.ECDSAKey, "ecdsa-256.key")
+
+ @requires_sha1_signing
+ def test_host_key_negotiation_4(self):
+ self._client_host_key_good(paramiko.RSAKey, "rsa.key")
+
+ def _setup_for_env(self):
+ threading.Thread(target=self._run).start()
+
+ self.tc = SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.assertEqual(0, len(self.tc.get_host_keys()))
+ self.tc.connect(
+ self.addr, self.port, username="slowdive", password="pygmalion"
+ )
+
+ self.event.wait(1.0)
+ self.assertTrue(self.event.isSet())
+ self.assertTrue(self.ts.is_active())
+
+ def test_update_environment(self):
+ """
+ Verify that environment variables can be set by the client.
+ """
+ self._setup_for_env()
+ target_env = {b"A": b"B", b"C": b"d"}
+
+ self.tc.exec_command("yes", environment=target_env)
+ schan = self.ts.accept(1.0)
+ self.assertEqual(target_env, getattr(schan, "env", {}))
+ schan.close()
+
+ @unittest.skip("Clients normally fail silently, thus so do we, for now")
+ def test_env_update_failures(self):
+ self._setup_for_env()
+ with self.assertRaises(SSHException) as manager:
+ # Verify that a rejection by the server can be detected
+ self.tc.exec_command("yes", environment={b"INVALID_ENV": b""})
+ self.assertTrue(
+ "INVALID_ENV" in str(manager.exception),
+ "Expected variable name in error message",
+ )
+ self.assertTrue(
+ isinstance(manager.exception.args[1], SSHException),
+ "Expected original SSHException in exception",
+ )
+
+ def test_missing_key_policy_accepts_classes_or_instances(self):
+ """
+ Client.missing_host_key_policy() can take classes or instances.
+ """
+ # AN ACTUAL UNIT TEST?! GOOD LORD
+ # (But then we have to test a private API...meh.)
+ client = SSHClient()
+ # Default
+ assert isinstance(client._policy, paramiko.RejectPolicy)
+ # Hand in an instance (classic behavior)
+ client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ assert isinstance(client._policy, paramiko.AutoAddPolicy)
+ # Hand in just the class (new behavior)
+ client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
+ assert isinstance(client._policy, paramiko.AutoAddPolicy)
+
+ @patch("paramiko.client.Transport")
+ def test_disabled_algorithms_defaults_to_None(self, Transport):
+ SSHClient().connect("host", sock=Mock(), password="no")
+ assert Transport.call_args[1]["disabled_algorithms"] is None
+
+ @patch("paramiko.client.Transport")
+ def test_disabled_algorithms_passed_directly_if_given(self, Transport):
+ SSHClient().connect(
+ "host",
+ sock=Mock(),
+ password="no",
+ disabled_algorithms={"keys": ["ssh-dss"]},
+ )
+ call_arg = Transport.call_args[1]["disabled_algorithms"]
+ assert call_arg == {"keys": ["ssh-dss"]}
+
+ @patch("paramiko.client.Transport")
+ def test_transport_factory_defaults_to_Transport(self, Transport):
+ sock, kex, creds, algos = Mock(), Mock(), Mock(), Mock()
+ SSHClient().connect(
+ "host",
+ sock=sock,
+ password="no",
+ gss_kex=kex,
+ gss_deleg_creds=creds,
+ disabled_algorithms=algos,
+ )
+ Transport.assert_called_once_with(
+ sock, gss_kex=kex, gss_deleg_creds=creds, disabled_algorithms=algos
+ )
+
+ @patch("paramiko.client.Transport")
+ def test_transport_factory_may_be_specified(self, Transport):
+ factory = Mock()
+ sock, kex, creds, algos = Mock(), Mock(), Mock(), Mock()
+ SSHClient().connect(
+ "host",
+ sock=sock,
+ password="no",
+ gss_kex=kex,
+ gss_deleg_creds=creds,
+ disabled_algorithms=algos,
+ transport_factory=factory,
+ )
+ factory.assert_called_once_with(
+ sock, gss_kex=kex, gss_deleg_creds=creds, disabled_algorithms=algos
+ )
+ # Safety check
+ assert not Transport.called
+
+
+class PasswordPassphraseTests(ClientTest):
+ # TODO: most of these could reasonably be set up to use mocks/assertions
+ # (e.g. "gave passphrase -> expect PKey was given it as the passphrase")
+ # instead of suffering a real connection cycle.
+ # TODO: in that case, move the below to be part of an integration suite?
+
+ @requires_sha1_signing
+ def test_password_kwarg_works_for_password_auth(self):
+ # Straightforward / duplicate of earlier basic password test.
+ self._test_connection(password="pygmalion")
+
+ # TODO: more granular exception pending #387; should be signaling "no auth
+ # methods available" because no key and no password
+ @raises(SSHException)
+ @requires_sha1_signing
+ def test_passphrase_kwarg_not_used_for_password_auth(self):
+ # Using the "right" password in the "wrong" field shouldn't work.
+ self._test_connection(passphrase="pygmalion")
+
+ @requires_sha1_signing
+ def test_passphrase_kwarg_used_for_key_passphrase(self):
+ # Straightforward again, with new passphrase kwarg.
+ self._test_connection(
+ key_filename=_support("test_rsa_password.key"),
+ passphrase="television",
+ )
+
+ @requires_sha1_signing
+ def test_password_kwarg_used_for_passphrase_when_no_passphrase_kwarg_given(
+ self,
+ ): # noqa
+ # Backwards compatibility: passphrase in the password field.
+ self._test_connection(
+ key_filename=_support("test_rsa_password.key"),
+ password="television",
+ )
+
+ @raises(AuthenticationException) # TODO: more granular
+ @requires_sha1_signing
+ def test_password_kwarg_not_used_for_passphrase_when_passphrase_kwarg_given( # noqa
+ self,
+ ):
+ # Sanity: if we're given both fields, the password field is NOT used as
+ # a passphrase.
+ self._test_connection(
+ key_filename=_support("test_rsa_password.key"),
+ password="television",
+ passphrase="wat? lol no",
+ )
diff --git a/tests/test_config.py b/tests/test_config.py
new file mode 100644
index 0000000..2e49aa3
--- /dev/null
+++ b/tests/test_config.py
@@ -0,0 +1,1048 @@
+# This file is part of Paramiko and subject to the license in /LICENSE in this
+# repository
+
+from os.path import expanduser
+from socket import gaierror
+
+try:
+ from invoke import Result
+except ImportError:
+ Result = None
+
+from unittest.mock import patch
+from pytest import raises, mark, fixture
+
+from paramiko import (
+ SSHConfig,
+ SSHConfigDict,
+ CouldNotCanonicalize,
+ ConfigParseError,
+)
+
+from ._util import _config
+
+
+@fixture
+def socket():
+ """
+ Patch all of socket.* in our config module to prevent eg real DNS lookups.
+
+ Also forces getaddrinfo (used in our addressfamily lookup stuff) to always
+ fail by default to mimic usual lack of AddressFamily related crap.
+
+ Callers who want to mock DNS lookups can then safely assume gethostbyname()
+ will be in use.
+ """
+ with patch("paramiko.config.socket") as mocket:
+ # Reinstate gaierror as an actual exception and not a sub-mock.
+ # (Presumably this would work with any exception, but why not use the
+ # real one?)
+ mocket.gaierror = gaierror
+ # Patch out getaddrinfo, used to detect family-specific IP lookup -
+ # only useful for a few specific tests.
+ mocket.getaddrinfo.side_effect = mocket.gaierror
+ # Patch out getfqdn to return some real string for when it gets called;
+ # some code (eg tokenization) gets mad w/ MagicMocks
+ mocket.getfqdn.return_value = "some.fake.fqdn"
+ mocket.gethostname.return_value = "local.fake.fqdn"
+ yield mocket
+
+
+def load_config(name):
+ return SSHConfig.from_path(_config(name))
+
+
+class TestSSHConfig:
+ def setup(self):
+ self.config = load_config("robey")
+
+ def test_init(self):
+ # No args!
+ with raises(TypeError):
+ SSHConfig("uh oh!")
+ # No args.
+ assert not SSHConfig()._config
+
+ def test_from_text(self):
+ config = SSHConfig.from_text("User foo")
+ assert config.lookup("foo.example.com")["user"] == "foo"
+
+ def test_from_file(self):
+ with open(_config("robey")) as flo:
+ config = SSHConfig.from_file(flo)
+ assert config.lookup("whatever")["user"] == "robey"
+
+ def test_from_path(self):
+ # NOTE: DO NOT replace with use of load_config() :D
+ config = SSHConfig.from_path(_config("robey"))
+ assert config.lookup("meh.example.com")["port"] == "3333"
+
+ def test_parse_config(self):
+ expected = [
+ {"host": ["*"], "config": {}},
+ {
+ "host": ["*"],
+ "config": {"identityfile": ["~/.ssh/id_rsa"], "user": "robey"},
+ },
+ {
+ "host": ["*.example.com"],
+ "config": {"user": "bjork", "port": "3333"},
+ },
+ {"host": ["*"], "config": {"crazy": "something dumb"}},
+ {
+ "host": ["spoo.example.com"],
+ "config": {"crazy": "something else"},
+ },
+ ]
+ assert self.config._config == expected
+
+ @mark.parametrize(
+ "host,values",
+ (
+ (
+ "irc.danger.com",
+ {
+ "crazy": "something dumb",
+ "hostname": "irc.danger.com",
+ "user": "robey",
+ },
+ ),
+ (
+ "irc.example.com",
+ {
+ "crazy": "something dumb",
+ "hostname": "irc.example.com",
+ "user": "robey",
+ "port": "3333",
+ },
+ ),
+ (
+ "spoo.example.com",
+ {
+ "crazy": "something dumb",
+ "hostname": "spoo.example.com",
+ "user": "robey",
+ "port": "3333",
+ },
+ ),
+ ),
+ )
+ def test_host_config(self, host, values):
+ expected = dict(
+ values, hostname=host, identityfile=[expanduser("~/.ssh/id_rsa")]
+ )
+ assert self.config.lookup(host) == expected
+
+ def test_fabric_issue_33(self):
+ config = SSHConfig.from_text(
+ """
+Host www13.*
+ Port 22
+
+Host *.example.com
+ Port 2222
+
+Host *
+ Port 3333
+"""
+ )
+ host = "www13.example.com"
+ expected = {"hostname": host, "port": "22"}
+ assert config.lookup(host) == expected
+
+ def test_proxycommand_config_equals_parsing(self):
+ """
+ ProxyCommand should not split on equals signs within the value.
+ """
+ config = SSHConfig.from_text(
+ """
+Host space-delimited
+ ProxyCommand foo bar=biz baz
+
+Host equals-delimited
+ ProxyCommand=foo bar=biz baz
+"""
+ )
+ for host in ("space-delimited", "equals-delimited"):
+ value = config.lookup(host)["proxycommand"]
+ assert value == "foo bar=biz baz"
+
+ def test_proxycommand_interpolation(self):
+ """
+ ProxyCommand should perform interpolation on the value
+ """
+ config = SSHConfig.from_text(
+ """
+Host specific
+ Port 37
+ ProxyCommand host %h port %p lol
+
+Host portonly
+ Port 155
+
+Host *
+ Port 25
+ ProxyCommand host %h port %p
+"""
+ )
+ for host, val in (
+ ("foo.com", "host foo.com port 25"),
+ ("specific", "host specific port 37 lol"),
+ ("portonly", "host portonly port 155"),
+ ):
+ assert config.lookup(host)["proxycommand"] == val
+
+ def test_proxycommand_tilde_expansion(self):
+ """
+ Tilde (~) should be expanded inside ProxyCommand
+ """
+ config = SSHConfig.from_text(
+ """
+Host test
+ ProxyCommand ssh -F ~/.ssh/test_config bastion nc %h %p
+"""
+ )
+ expected = "ssh -F {}/.ssh/test_config bastion nc test 22".format(
+ expanduser("~")
+ )
+ got = config.lookup("test")["proxycommand"]
+ assert got == expected
+
+ @patch("paramiko.config.getpass")
+ def test_proxyjump_token_expansion(self, getpass):
+ getpass.getuser.return_value = "gandalf"
+ config = SSHConfig.from_text(
+ """
+Host justhost
+ ProxyJump jumpuser@%h
+Host userhost
+ ProxyJump %r@%h:222
+Host allcustom
+ ProxyJump %r@%h:%p
+"""
+ )
+ assert config.lookup("justhost")["proxyjump"] == "jumpuser@justhost"
+ assert config.lookup("userhost")["proxyjump"] == "gandalf@userhost:222"
+ assert (
+ config.lookup("allcustom")["proxyjump"] == "gandalf@allcustom:22"
+ )
+
+ @patch("paramiko.config.getpass")
+ def test_controlpath_token_expansion(self, getpass, socket):
+ getpass.getuser.return_value = "gandalf"
+ config = SSHConfig.from_text(
+ """
+Host explicit_user
+ User root
+ ControlPath user %u remoteuser %r
+
+Host explicit_host
+ HostName ohai
+ ControlPath remoteuser %r host %h orighost %n
+
+Host hashbrowns
+ ControlPath %C
+ """
+ )
+ result = config.lookup("explicit_user")["controlpath"]
+ # Remote user is User val, local user is User val
+ assert result == "user gandalf remoteuser root"
+ result = config.lookup("explicit_host")["controlpath"]
+ # Remote user falls back to local user; host and orighost may differ
+ assert result == "remoteuser gandalf host ohai orighost explicit_host"
+ # Supports %C
+ result = config.lookup("hashbrowns")["controlpath"]
+ assert result == "a438e7dbf5308b923aba9db8fe2ca63447ac8688"
+
+ def test_negation(self):
+ config = SSHConfig.from_text(
+ """
+Host www13.* !*.example.com
+ Port 22
+
+Host *.example.com !www13.*
+ Port 2222
+
+Host www13.*
+ Port 8080
+
+Host *
+ Port 3333
+"""
+ )
+ host = "www13.example.com"
+ expected = {"hostname": host, "port": "8080"}
+ assert config.lookup(host) == expected
+
+ def test_proxycommand(self):
+ config = SSHConfig.from_text(
+ """
+Host proxy-with-equal-divisor-and-space
+ProxyCommand = foo=bar
+
+Host proxy-with-equal-divisor-and-no-space
+ProxyCommand=foo=bar
+
+Host proxy-without-equal-divisor
+ProxyCommand foo=bar:%h-%p
+"""
+ )
+ for host, values in {
+ "proxy-with-equal-divisor-and-space": {
+ "hostname": "proxy-with-equal-divisor-and-space",
+ "proxycommand": "foo=bar",
+ },
+ "proxy-with-equal-divisor-and-no-space": {
+ "hostname": "proxy-with-equal-divisor-and-no-space",
+ "proxycommand": "foo=bar",
+ },
+ "proxy-without-equal-divisor": {
+ "hostname": "proxy-without-equal-divisor",
+ "proxycommand": "foo=bar:proxy-without-equal-divisor-22",
+ },
+ }.items():
+
+ assert config.lookup(host) == values
+
+ @patch("paramiko.config.getpass")
+ def test_identityfile(self, getpass, socket):
+ getpass.getuser.return_value = "gandalf"
+ config = SSHConfig.from_text(
+ """
+IdentityFile id_dsa0
+
+Host *
+IdentityFile id_dsa1
+
+Host dsa2
+IdentityFile id_dsa2
+
+Host dsa2*
+IdentityFile id_dsa22
+
+Host hashbrowns
+IdentityFile %C
+"""
+ )
+ for host, values in {
+ "foo": {"hostname": "foo", "identityfile": ["id_dsa0", "id_dsa1"]},
+ "dsa2": {
+ "hostname": "dsa2",
+ "identityfile": ["id_dsa0", "id_dsa1", "id_dsa2", "id_dsa22"],
+ },
+ "dsa22": {
+ "hostname": "dsa22",
+ "identityfile": ["id_dsa0", "id_dsa1", "id_dsa22"],
+ },
+ "hashbrowns": {
+ "hostname": "hashbrowns",
+ "identityfile": [
+ "id_dsa0",
+ "id_dsa1",
+ "a438e7dbf5308b923aba9db8fe2ca63447ac8688",
+ ],
+ },
+ }.items():
+ assert config.lookup(host) == values
+
+ def test_config_addressfamily_and_lazy_fqdn(self):
+ """
+ Ensure the code path honoring non-'all' AddressFamily doesn't asplode
+ """
+ config = SSHConfig.from_text(
+ """
+AddressFamily inet
+IdentityFile something_%l_using_fqdn
+"""
+ )
+ assert config.lookup(
+ "meh"
+ ) # will die during lookup() if bug regresses
+
+ def test_config_dos_crlf_succeeds(self):
+ config = SSHConfig.from_text(
+ """
+Host abcqwerty\r\nHostName 127.0.0.1\r\n
+"""
+ )
+ assert config.lookup("abcqwerty")["hostname"] == "127.0.0.1"
+
+ def test_get_hostnames(self):
+ expected = {"*", "*.example.com", "spoo.example.com"}
+ assert self.config.get_hostnames() == expected
+
+ def test_quoted_host_names(self):
+ config = SSHConfig.from_text(
+ """
+Host "param pam" param "pam"
+ Port 1111
+
+Host "param2"
+ Port 2222
+
+Host param3 parara
+ Port 3333
+
+Host param4 "p a r" "p" "par" para
+ Port 4444
+"""
+ )
+ res = {
+ "param pam": {"hostname": "param pam", "port": "1111"},
+ "param": {"hostname": "param", "port": "1111"},
+ "pam": {"hostname": "pam", "port": "1111"},
+ "param2": {"hostname": "param2", "port": "2222"},
+ "param3": {"hostname": "param3", "port": "3333"},
+ "parara": {"hostname": "parara", "port": "3333"},
+ "param4": {"hostname": "param4", "port": "4444"},
+ "p a r": {"hostname": "p a r", "port": "4444"},
+ "p": {"hostname": "p", "port": "4444"},
+ "par": {"hostname": "par", "port": "4444"},
+ "para": {"hostname": "para", "port": "4444"},
+ }
+ for host, values in res.items():
+ assert config.lookup(host) == values
+
+ def test_quoted_params_in_config(self):
+ config = SSHConfig.from_text(
+ """
+Host "param pam" param "pam"
+ IdentityFile id_rsa
+
+Host "param2"
+ IdentityFile "test rsa key"
+
+Host param3 parara
+ IdentityFile id_rsa
+ IdentityFile "test rsa key"
+"""
+ )
+ res = {
+ "param pam": {"hostname": "param pam", "identityfile": ["id_rsa"]},
+ "param": {"hostname": "param", "identityfile": ["id_rsa"]},
+ "pam": {"hostname": "pam", "identityfile": ["id_rsa"]},
+ "param2": {"hostname": "param2", "identityfile": ["test rsa key"]},
+ "param3": {
+ "hostname": "param3",
+ "identityfile": ["id_rsa", "test rsa key"],
+ },
+ "parara": {
+ "hostname": "parara",
+ "identityfile": ["id_rsa", "test rsa key"],
+ },
+ }
+ for host, values in res.items():
+ assert config.lookup(host) == values
+
+ def test_quoted_host_in_config(self):
+ conf = SSHConfig()
+ correct_data = {
+ "param": ["param"],
+ '"param"': ["param"],
+ "param pam": ["param", "pam"],
+ '"param" "pam"': ["param", "pam"],
+ '"param" pam': ["param", "pam"],
+ 'param "pam"': ["param", "pam"],
+ 'param "pam" p': ["param", "pam", "p"],
+ '"param" pam "p"': ["param", "pam", "p"],
+ '"pa ram"': ["pa ram"],
+ '"pa ram" pam': ["pa ram", "pam"],
+ 'param "p a m"': ["param", "p a m"],
+ }
+ incorrect_data = ['param"', '"param', 'param "pam', 'param "pam" "p a']
+ for host, values in correct_data.items():
+ assert conf._get_hosts(host) == values
+ for host in incorrect_data:
+ with raises(ConfigParseError):
+ conf._get_hosts(host)
+
+ def test_invalid_line_format_excepts(self):
+ with raises(ConfigParseError):
+ load_config("invalid")
+
+ def test_proxycommand_none_issue_415(self):
+ config = SSHConfig.from_text(
+ """
+Host proxycommand-standard-none
+ ProxyCommand None
+
+Host proxycommand-with-equals-none
+ ProxyCommand=None
+"""
+ )
+ for host, values in {
+ "proxycommand-standard-none": {
+ "hostname": "proxycommand-standard-none",
+ "proxycommand": None,
+ },
+ "proxycommand-with-equals-none": {
+ "hostname": "proxycommand-with-equals-none",
+ "proxycommand": None,
+ },
+ }.items():
+
+ assert config.lookup(host) == values
+
+ def test_proxycommand_none_masking(self):
+ # Re: https://github.com/paramiko/paramiko/issues/670
+ config = SSHConfig.from_text(
+ """
+Host specific-host
+ ProxyCommand none
+
+Host other-host
+ ProxyCommand other-proxy
+
+Host *
+ ProxyCommand default-proxy
+"""
+ )
+ # In versions <3.0, 'None' ProxyCommands got deleted, and this itself
+ # caused bugs. In 3.0, we more cleanly map "none" to None. This test
+ # has been altered accordingly but left around to ensure no
+ # regressions.
+ assert config.lookup("specific-host")["proxycommand"] is None
+ assert config.lookup("other-host")["proxycommand"] == "other-proxy"
+ cmd = config.lookup("some-random-host")["proxycommand"]
+ assert cmd == "default-proxy"
+
+ def test_hostname_tokenization(self):
+ result = load_config("hostname-tokenized").lookup("whatever")
+ assert result["hostname"] == "prefix.whatever"
+
+
+class TestSSHConfigDict:
+ def test_SSHConfigDict_construct_empty(self):
+ assert not SSHConfigDict()
+
+ def test_SSHConfigDict_construct_from_list(self):
+ assert SSHConfigDict([(1, 2)])[1] == 2
+
+ def test_SSHConfigDict_construct_from_dict(self):
+ assert SSHConfigDict({1: 2})[1] == 2
+
+ @mark.parametrize("true_ish", ("yes", "YES", "Yes", True))
+ def test_SSHConfigDict_as_bool_true_ish(self, true_ish):
+ assert SSHConfigDict({"key": true_ish}).as_bool("key") is True
+
+ @mark.parametrize("false_ish", ("no", "NO", "No", False))
+ def test_SSHConfigDict_as_bool(self, false_ish):
+ assert SSHConfigDict({"key": false_ish}).as_bool("key") is False
+
+ @mark.parametrize("int_val", ("42", 42))
+ def test_SSHConfigDict_as_int(self, int_val):
+ assert SSHConfigDict({"key": int_val}).as_int("key") == 42
+
+ @mark.parametrize("non_int", ("not an int", None, object()))
+ def test_SSHConfigDict_as_int_failures(self, non_int):
+ conf = SSHConfigDict({"key": non_int})
+
+ try:
+ int(non_int)
+ except Exception as e:
+ exception_type = type(e)
+
+ with raises(exception_type):
+ conf.as_int("key")
+
+ def test_SSHConfig_host_dicts_are_SSHConfigDict_instances(self):
+ config = SSHConfig.from_text(
+ """
+Host *.example.com
+ Port 2222
+
+Host *
+ Port 3333
+"""
+ )
+ assert config.lookup("foo.example.com").as_int("port") == 2222
+
+ def test_SSHConfig_wildcard_host_dicts_are_SSHConfigDict_instances(self):
+ config = SSHConfig.from_text(
+ """
+Host *.example.com
+ Port 2222
+
+Host *
+ Port 3333
+"""
+ )
+ assert config.lookup("anything-else").as_int("port") == 3333
+
+
+class TestHostnameCanonicalization:
+ # NOTE: this class uses on-disk configs, and ones with real (at time of
+ # writing) DNS names, so that one can easily test OpenSSH's behavior using
+ # "ssh -F path/to/file.config -G <target>".
+
+ def test_off_by_default(self, socket):
+ result = load_config("basic").lookup("www")
+ assert result["hostname"] == "www"
+ assert "user" not in result
+ assert not socket.gethostbyname.called
+
+ def test_explicit_no_same_as_default(self, socket):
+ result = load_config("no-canon").lookup("www")
+ assert result["hostname"] == "www"
+ assert "user" not in result
+ assert not socket.gethostbyname.called
+
+ @mark.parametrize(
+ "config_name",
+ ("canon", "canon-always", "canon-local", "canon-local-always"),
+ )
+ def test_canonicalization_base_cases(self, socket, config_name):
+ result = load_config(config_name).lookup("www")
+ assert result["hostname"] == "www.paramiko.org"
+ assert result["user"] == "rando"
+ socket.gethostbyname.assert_called_once_with("www.paramiko.org")
+
+ def test_uses_getaddrinfo_when_AddressFamily_given(self, socket):
+ # Undo default 'always fails' mock
+ socket.getaddrinfo.side_effect = None
+ socket.getaddrinfo.return_value = [True] # just need 1st value truthy
+ result = load_config("canon-ipv4").lookup("www")
+ assert result["hostname"] == "www.paramiko.org"
+ assert result["user"] == "rando"
+ assert not socket.gethostbyname.called
+ gai_args = socket.getaddrinfo.call_args[0]
+ assert gai_args[0] == "www.paramiko.org"
+ assert gai_args[2] is socket.AF_INET # Mocked, but, still useful
+
+ @mark.skip
+ def test_empty_CanonicalDomains_canonicalizes_despite_noop(self, socket):
+ # Confirmed this is how OpenSSH behaves as well. Bit silly, but.
+ # TODO: this requires modifying SETTINGS_REGEX, which is a mite scary
+ # (honestly I'd prefer to move to a real parser lib anyhow) and since
+ # this is a very dumb corner case, it's marked skip for now.
+ result = load_config("empty-canon").lookup("www")
+ assert result["hostname"] == "www" # no paramiko.org
+ assert "user" not in result # did not discover canonicalized block
+
+ def test_CanonicalDomains_may_be_set_to_space_separated_list(self, socket):
+ # Test config has a bogus domain, followed by paramiko.org
+ socket.gethostbyname.side_effect = [socket.gaierror, True]
+ result = load_config("multi-canon-domains").lookup("www")
+ assert result["hostname"] == "www.paramiko.org"
+ assert result["user"] == "rando"
+ assert [x[0][0] for x in socket.gethostbyname.call_args_list] == [
+ "www.not-a-real-tld",
+ "www.paramiko.org",
+ ]
+
+ def test_canonicalization_applies_to_single_dot_by_default(self, socket):
+ result = load_config("deep-canon").lookup("sub.www")
+ assert result["hostname"] == "sub.www.paramiko.org"
+ assert result["user"] == "deep"
+
+ def test_canonicalization_not_applied_to_two_dots_by_default(self, socket):
+ result = load_config("deep-canon").lookup("subber.sub.www")
+ assert result["hostname"] == "subber.sub.www"
+ assert "user" not in result
+
+ def test_hostname_depth_controllable_with_max_dots_directive(self, socket):
+ # This config sets MaxDots of 2, so now canonicalization occurs
+ result = load_config("deep-canon-maxdots").lookup("subber.sub.www")
+ assert result["hostname"] == "subber.sub.www.paramiko.org"
+ assert result["user"] == "deeper"
+
+ def test_max_dots_may_be_zero(self, socket):
+ result = load_config("zero-maxdots").lookup("sub.www")
+ assert result["hostname"] == "sub.www"
+ assert "user" not in result
+
+ def test_fallback_yes_does_not_canonicalize_or_error(self, socket):
+ socket.gethostbyname.side_effect = socket.gaierror
+ result = load_config("fallback-yes").lookup("www")
+ assert result["hostname"] == "www"
+ assert "user" not in result
+
+ def test_fallback_no_causes_errors_for_unresolvable_names(self, socket):
+ socket.gethostbyname.side_effect = socket.gaierror
+ with raises(CouldNotCanonicalize) as info:
+ load_config("fallback-no").lookup("doesnotexist")
+ assert str(info.value) == "doesnotexist"
+
+ def test_identityfile_continues_being_appended_to(self, socket):
+ result = load_config("canon").lookup("www")
+ assert result["identityfile"] == ["base.key", "canonicalized.key"]
+
+
+@mark.skip
+class TestCanonicalizationOfCNAMEs:
+ def test_permitted_cnames_may_be_one_to_one_mapping(self):
+ # CanonicalizePermittedCNAMEs *.foo.com:*.bar.com
+ pass
+
+ def test_permitted_cnames_may_be_one_to_many_mapping(self):
+ # CanonicalizePermittedCNAMEs *.foo.com:*.bar.com,*.biz.com
+ pass
+
+ def test_permitted_cnames_may_be_many_to_one_mapping(self):
+ # CanonicalizePermittedCNAMEs *.foo.com,*.bar.com:*.biz.com
+ pass
+
+ def test_permitted_cnames_may_be_many_to_many_mapping(self):
+ # CanonicalizePermittedCNAMEs *.foo.com,*.bar.com:*.biz.com,*.baz.com
+ pass
+
+ def test_permitted_cnames_may_be_multiple_mappings(self):
+ # CanonicalizePermittedCNAMEs *.foo.com,*.bar.com *.biz.com:*.baz.com
+ pass
+
+ def test_permitted_cnames_may_be_multiple_complex_mappings(self):
+ # Same as prev but with multiple patterns on both ends in both args
+ pass
+
+
+class TestMatchAll:
+ def test_always_matches(self):
+ result = load_config("match-all").lookup("general")
+ assert result["user"] == "awesome"
+
+ def test_may_not_mix_with_non_canonical_keywords(self):
+ for config in ("match-all-and-more", "match-all-and-more-before"):
+ with raises(ConfigParseError):
+ load_config(config).lookup("whatever")
+
+ def test_may_come_after_canonical(self, socket):
+ result = load_config("match-all-after-canonical").lookup("www")
+ assert result["user"] == "awesome"
+
+ def test_may_not_come_before_canonical(self, socket):
+ with raises(ConfigParseError):
+ load_config("match-all-before-canonical")
+
+ def test_after_canonical_not_loaded_when_non_canonicalized(self, socket):
+ result = load_config("match-canonical-no").lookup("a-host")
+ assert "user" not in result
+
+
+def _expect(success_on):
+ """
+ Returns a side_effect-friendly Invoke success result for given command(s).
+
+ Ensures that any other commands fail; this is useful for testing 'Match
+ exec' because it means all other such clauses under test act like no-ops.
+
+ :param success_on:
+ Single string or list of strings, noting commands that should appear to
+ succeed.
+ """
+ if isinstance(success_on, str):
+ success_on = [success_on]
+
+ def inner(command, *args, **kwargs):
+ # Sanity checking - we always expect that invoke.run is called with
+ # these.
+ assert kwargs.get("hide", None) == "stdout"
+ assert kwargs.get("warn", None) is True
+ # Fake exit
+ exit = 0 if command in success_on else 1
+ return Result(exited=exit)
+
+ return inner
+
+
+@mark.skipif(Result is None, reason="requires invoke package")
+class TestMatchExec:
+ @patch("paramiko.config.invoke", new=None)
+ @patch("paramiko.config.invoke_import_error", new=ImportError("meh"))
+ def test_raises_invoke_ImportErrors_at_runtime(self):
+ # Not an ideal test, but I don't know of a non-bad way to fake out
+ # module-time ImportErrors. So we mock the symptoms. Meh!
+ with raises(ImportError) as info:
+ load_config("match-exec").lookup("oh-noes")
+ assert str(info.value) == "meh"
+
+ @patch("paramiko.config.invoke.run")
+ @mark.parametrize(
+ "cmd,user",
+ [
+ ("unquoted", "rando"),
+ ("quoted", "benjamin"),
+ ("quoted spaced", "neil"),
+ ],
+ )
+ def test_accepts_single_possibly_quoted_argument(self, run, cmd, user):
+ run.side_effect = _expect(cmd)
+ result = load_config("match-exec").lookup("whatever")
+ assert result["user"] == user
+
+ @patch("paramiko.config.invoke.run")
+ def test_does_not_match_nonzero_exit_codes(self, run):
+ # Nothing will succeed -> no User ever gets loaded
+ run.return_value = Result(exited=1)
+ result = load_config("match-exec").lookup("whatever")
+ assert "user" not in result
+
+ @patch("paramiko.config.getpass")
+ @patch("paramiko.config.invoke.run")
+ def test_tokenizes_argument(self, run, getpass, socket):
+ getpass.getuser.return_value = "gandalf"
+ # Actual exec value is "%C %d %h %L %l %n %p %r %u"
+ parts = (
+ "bf5ba06778434a9384ee4217e462f64888bd0cd2",
+ expanduser("~"),
+ "configured",
+ "local",
+ "some.fake.fqdn",
+ "target",
+ "22",
+ "intermediate",
+ "gandalf",
+ )
+ run.side_effect = _expect(" ".join(parts))
+ result = load_config("match-exec").lookup("target")
+ assert result["port"] == "1337"
+
+ @patch("paramiko.config.invoke.run")
+ def test_works_with_canonical(self, run, socket):
+ # Ensure both stanzas' exec components appear to match
+ run.side_effect = _expect(["uncanonicalized", "canonicalized"])
+ result = load_config("match-exec-canonical").lookup("who-cares")
+ # Prove both config values got loaded up, across the two passes
+ assert result["user"] == "defenseless"
+ assert result["port"] == "8007"
+
+ @patch("paramiko.config.invoke.run")
+ def test_may_be_negated(self, run):
+ run.side_effect = _expect("this succeeds")
+ result = load_config("match-exec-negation").lookup("so-confusing")
+ # If negation did not work, the first of the two Match exec directives
+ # would have set User to 'nope' (and/or the second would have NOT set
+ # User to 'yup')
+ assert result["user"] == "yup"
+
+ def test_requires_an_argument(self):
+ with raises(ConfigParseError):
+ load_config("match-exec-no-arg")
+
+ @patch("paramiko.config.invoke.run")
+ def test_works_with_tokenized_hostname(self, run):
+ run.side_effect = _expect("ping target")
+ result = load_config("hostname-exec-tokenized").lookup("target")
+ assert result["hostname"] == "pingable.target"
+
+
+class TestMatchHost:
+ def test_matches_target_name_when_no_hostname(self):
+ result = load_config("match-host").lookup("target")
+ assert result["user"] == "rand"
+
+ def test_matches_hostname_from_global_setting(self):
+ # Also works for ones set in regular Host stanzas
+ result = load_config("match-host-name").lookup("anything")
+ assert result["user"] == "silly"
+
+ def test_matches_hostname_from_earlier_match(self):
+ # Corner case: one Match matches original host, sets HostName,
+ # subsequent Match matches the latter.
+ result = load_config("match-host-from-match").lookup("original-host")
+ assert result["user"] == "inner"
+
+ def test_may_be_globbed(self):
+ result = load_config("match-host-glob-list").lookup("whatever")
+ assert result["user"] == "matrim"
+
+ def test_may_be_comma_separated_list(self):
+ for target in ("somehost", "someotherhost"):
+ result = load_config("match-host-glob-list").lookup(target)
+ assert result["user"] == "thom"
+
+ def test_comma_separated_list_may_have_internal_negation(self):
+ conf = load_config("match-host-glob-list")
+ assert conf.lookup("good")["user"] == "perrin"
+ assert "user" not in conf.lookup("goof")
+
+ def test_matches_canonicalized_name(self, socket):
+ # Without 'canonical' explicitly declared, mind.
+ result = load_config("match-host-canonicalized").lookup("www")
+ assert result["user"] == "rand"
+
+ def test_works_with_canonical_keyword(self, socket):
+ # NOTE: distinct from 'happens to be canonicalized' above
+ result = load_config("match-host-canonicalized").lookup("docs")
+ assert result["user"] == "eric"
+
+ def test_may_be_negated(self):
+ conf = load_config("match-host-negated")
+ assert conf.lookup("docs")["user"] == "jeff"
+ assert "user" not in conf.lookup("www")
+
+ def test_requires_an_argument(self):
+ with raises(ConfigParseError):
+ load_config("match-host-no-arg")
+
+
+class TestMatchOriginalHost:
+ def test_matches_target_host_not_hostname(self):
+ result = load_config("match-orighost").lookup("target")
+ assert result["hostname"] == "bogus"
+ assert result["user"] == "tuon"
+
+ def test_matches_target_host_not_canonicalized_name(self, socket):
+ result = load_config("match-orighost-canonical").lookup("www")
+ assert result["hostname"] == "www.paramiko.org"
+ assert result["user"] == "tuon"
+
+ def test_may_be_globbed(self):
+ result = load_config("match-orighost").lookup("whatever")
+ assert result["user"] == "matrim"
+
+ def test_may_be_comma_separated_list(self):
+ for target in ("comma", "separated"):
+ result = load_config("match-orighost").lookup(target)
+ assert result["user"] == "chameleon"
+
+ def test_comma_separated_list_may_have_internal_negation(self):
+ result = load_config("match-orighost").lookup("nope")
+ assert "user" not in result
+
+ def test_may_be_negated(self):
+ result = load_config("match-orighost").lookup("docs")
+ assert result["user"] == "thom"
+
+ def test_requires_an_argument(self):
+ with raises(ConfigParseError):
+ load_config("match-orighost-no-arg")
+
+
+class TestMatchUser:
+ def test_matches_configured_username(self):
+ result = load_config("match-user-explicit").lookup("anything")
+ assert result["hostname"] == "dumb"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_matches_local_username_by_default(self, getuser):
+ getuser.return_value = "gandalf"
+ result = load_config("match-user").lookup("anything")
+ assert result["hostname"] == "gondor"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_may_be_globbed(self, getuser):
+ for user in ("bilbo", "bombadil"):
+ getuser.return_value = user
+ result = load_config("match-user").lookup("anything")
+ assert result["hostname"] == "shire"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_may_be_comma_separated_list(self, getuser):
+ for user in ("aragorn", "frodo"):
+ getuser.return_value = user
+ result = load_config("match-user").lookup("anything")
+ assert result["hostname"] == "moria"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_comma_separated_list_may_have_internal_negation(self, getuser):
+ getuser.return_value = "legolas"
+ result = load_config("match-user").lookup("anything")
+ assert "port" not in result
+ getuser.return_value = "gimli"
+ result = load_config("match-user").lookup("anything")
+ assert result["port"] == "7373"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_may_be_negated(self, getuser):
+ getuser.return_value = "saruman"
+ result = load_config("match-user").lookup("anything")
+ assert result["hostname"] == "mordor"
+
+ def test_requires_an_argument(self):
+ with raises(ConfigParseError):
+ load_config("match-user-no-arg")
+
+
+# NOTE: highly derivative of previous suite due to the former's use of
+# localuser fallback. Doesn't seem worth conflating/refactoring right now.
+class TestMatchLocalUser:
+ @patch("paramiko.config.getpass.getuser")
+ def test_matches_local_username(self, getuser):
+ getuser.return_value = "gandalf"
+ result = load_config("match-localuser").lookup("anything")
+ assert result["hostname"] == "gondor"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_may_be_globbed(self, getuser):
+ for user in ("bilbo", "bombadil"):
+ getuser.return_value = user
+ result = load_config("match-localuser").lookup("anything")
+ assert result["hostname"] == "shire"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_may_be_comma_separated_list(self, getuser):
+ for user in ("aragorn", "frodo"):
+ getuser.return_value = user
+ result = load_config("match-localuser").lookup("anything")
+ assert result["hostname"] == "moria"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_comma_separated_list_may_have_internal_negation(self, getuser):
+ getuser.return_value = "legolas"
+ result = load_config("match-localuser").lookup("anything")
+ assert "port" not in result
+ getuser.return_value = "gimli"
+ result = load_config("match-localuser").lookup("anything")
+ assert result["port"] == "7373"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_may_be_negated(self, getuser):
+ getuser.return_value = "saruman"
+ result = load_config("match-localuser").lookup("anything")
+ assert result["hostname"] == "mordor"
+
+ def test_requires_an_argument(self):
+ with raises(ConfigParseError):
+ load_config("match-localuser-no-arg")
+
+
+class TestComplexMatching:
+ # NOTE: this is still a cherry-pick of a few levels of complexity, there's
+ # no point testing literally all possible combinations.
+
+ def test_originalhost_host(self):
+ result = load_config("match-complex").lookup("target")
+ assert result["hostname"] == "bogus"
+ assert result["user"] == "rand"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_originalhost_localuser(self, getuser):
+ getuser.return_value = "rando"
+ result = load_config("match-complex").lookup("remote")
+ assert result["user"] == "calrissian"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_everything_but_all(self, getuser):
+ getuser.return_value = "rando"
+ result = load_config("match-complex").lookup("www")
+ assert result["port"] == "7777"
+
+ @patch("paramiko.config.getpass.getuser")
+ def test_everything_but_all_with_some_negated(self, getuser):
+ getuser.return_value = "rando"
+ result = load_config("match-complex").lookup("docs")
+ assert result["port"] == "1234"
+
+ def test_negated_canonical(self, socket):
+ # !canonical in a config that is not canonicalized - does match
+ result = load_config("match-canonical-no").lookup("specific")
+ assert result["user"] == "overload"
+ # !canonical in a config that is canonicalized - does NOT match
+ result = load_config("match-canonical-yes").lookup("www")
+ assert result["user"] == "hidden"
+
+
+class TestFinalMatching(object):
+ def test_finally(self):
+ result = load_config("match-final").lookup("finally")
+ assert result["proxyjump"] == "jump"
+ assert result["port"] == "1001"
+
+ def test_default_port(self):
+ result = load_config("match-final").lookup("default-port")
+ assert result["proxyjump"] == "jump"
+ assert result["port"] == "1002"
+
+ def test_negated(self):
+ result = load_config("match-final").lookup("jump")
+ assert result["port"] == "1003"
diff --git a/tests/test_dss_openssh.key b/tests/test_dss_openssh.key
new file mode 100644
index 0000000..2a9f892
--- /dev/null
+++ b/tests/test_dss_openssh.key
@@ -0,0 +1,22 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAACmFlczI1Ni1jdHIAAAAGYmNyeXB0AAAAGAAAABAsyq4pxL
+R5sOprPDHGpvzxAAAAEAAAAAEAAAGxAAAAB3NzaC1kc3MAAACBAL8XEx7F9xuwBNles+vW
+pNF+YcofrBhjX1r5QhpBe0eoYWLHRcroN6lxwCdGYRfgOoRjTncBiixQX/uUxAY96zDh3i
+r492s2BcJt4ihvNn/AY0I0OTuX/2IwGk9CGzafjaeZNVYxMa8lcVt0hSOTjkPQ7gVuk6bJ
+zMInvie+VWKLAAAAFQDUgYdY+rhR0SkKbC09BS/SIHcB+wAAAIB44+4zpCNcd0CGvZlowH
+99zyPX8uxQtmTLQFuR2O8O0FgVVuCdDgD0D9W8CLOp32oatpM0jyyN89EdvSWzjHzZJ+L6
+H1FtZps7uhpDFWHdva1R25vyGecLMUuXjo5t/D7oCDih+HwHoSAxoi0QvsPd8/qqHQVznN
+JKtR6thUpXEwAAAIAG4DCBjbgTTgpBw0egRkJwBSz0oTt+1IcapNU2jA6N8urMSk9YXHEQ
+HKN68BAF3YJ59q2Ujv3LOXmBqGd1T+kzwUszfMlgzq8MMu19Yfzse6AIK1Agn1Vj6F7YXL
+sXDN+T4KszX5+FJa7t/Zsp3nALWy6l0f4WKivEF5Y2QpEFcQAAAgCH6XUl1hYWB6kgCSHV
+a4C+vQHrgFNgNwEQnE074LXHXlAhxC+Dm8XTGqVPX1KRPWzadq9/+v6pqLFqiRueB86uRb
+J5WtAbUs3WwxAaC5Mi+mn42MBfL9PIwWPWCvstrAq9Nyj3EBMeX3XFLxN3RuGXIQnY/5rF
+f5hriUVxhWDQGIVbBKhkpn7Geqg6nLpn7iqQhzFmFGjPmAdrllgdVGJRLyIN6BRsaltDdy
+vxufkvGzKudvQ85QvsaoFJQ6K1d0S7907pexvxmWpcO7zchXb6i09BITWOAKIcHpVkbNQw
++8pzSdpggsAwCRbfk/Jkezz8sXVUCfmmJ23NFUw04/0ZbilCADRsUaPfafgVPeDznBnuCm
+tfXa4JSrVUvPdwoex3SKZmYsFXwsuOEQnFkhUGHfWwTbmOmxzy6dtC24KYhnWG5OGFVJXh
+3B8jQJGGs2ANfusI/Z0o15tAnQy5fqsLf9TT3RX7RG2ujIiDBsU+A1g//IXmSxxkUOQMZs
+v+cMI8KfODAXmQtB30+yAgoV03Zb/bdptv+HqPT4eeecstJUxzEGYADt1mDq3uV7fQbNmo
+80bppU52JjztrJb7hBmXsXHPRRK6spQ1FCatqvu1ggZeXZpEifNsHeqCljt87ueXsQsORY
+pvhLzjTbTKZmjLDPuB+GxUNLEKh1ZNyAqKng==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/test_dss_password.key b/tests/test_dss_password.key
new file mode 100644
index 0000000..e2a9bc5
--- /dev/null
+++ b/tests/test_dss_password.key
@@ -0,0 +1,15 @@
+-----BEGIN DSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,78DAEB836ED0A646
+
+ldWkq9OMlXqWmjIqppNnmNPIUj5uVT12LkBosTApTbibTme3kIJb1uDeG2BShVfY
++vDOTUE9koGPDLsxW1t5At+EVyIDK8aIO0uHteXM5AbBX20LLUWRbRVqZhsMxqQh
+3H3XlHiN+QhaWcb4fFuu18a8SkimTFpDnZuffoCDl/zh/B7XieARTLA805K/ZgVB
+BBwflkR2BE053XHrJAIx9BEUlLP76Fo18rvjLZOSeu3s+VnnhqUb5FCt5h50a46u
+YXQBbo2r9Zo1ilGMNEXJO0gk5hwGVmTySz53NkPA5HmWt8NIzv5jQHMDy7N+ZykF
+uwpP1R5M/ZIFY4Y5h/lvn6IJjQ7VySRPIbpN8o2YJv2OD1Ja80n3tU8Mg77o3o4d
+NwKm7cCjlq+FuIBdOsSgsB8FPQRUhW+jpFDxmWN64DM2cEg6RUdptby7WmMp0HwK
+1qyEfxHjLMuDVlD7lASIDBrRlUjPtXEH1DzIYQuYaRZaixFoZ7EY+X73TwmrKFEU
+US9ZnQZtRtroRqGwR4fz4wQQsjTl/AmOijlBmi29taJccJsT/THrLQ5plOEd8OMv
+9FsaPJXBU85gaRKo3JZtrw==
+-----END DSA PRIVATE KEY-----
diff --git a/tests/test_ecdsa_384.key b/tests/test_ecdsa_384.key
new file mode 100644
index 0000000..796bf41
--- /dev/null
+++ b/tests/test_ecdsa_384.key
@@ -0,0 +1,6 @@
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBDdO8IXvlLJgM7+sNtPl7tI7FM5kzuEUEEPRjXIPQM7mISciwJPBt+
+y43EuG8nL4mgBwYFK4EEACKhZANiAAQWxom0C1vQAGYhjdoREMVmGKBWlisDdzyk
+mgyUjKpiJ9WfbIEVLsPGP8OdNjhr1y/8BZNIts+dJd6VmYw+4HzB+4F+U1Igs8K0
+JEvh59VNkvWheViadDXCM2MV8Nq+DNg=
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ecdsa_384_openssh.key b/tests/test_ecdsa_384_openssh.key
new file mode 100644
index 0000000..8a160ce
--- /dev/null
+++ b/tests/test_ecdsa_384_openssh.key
@@ -0,0 +1,11 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAACmFlczI1Ni1jdHIAAAAGYmNyeXB0AAAAGAAAABDwIHkBEZ
+75XuqQS6/7daAIAAAAEAAAAAEAAACIAAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlz
+dHAzODQAAABhBIch5LXTq/L/TWsTGG6dIktxD8DIMh7EfvoRmWsks6CuNDTvFvbQNtY4QO
+1mn5OXegHbS0M5DPIS++wpKGFP3suDEH08O35vZQasLNrL0tO2jyyEnzB2ZEx3PPYci811
+ygAAAOBKGxFl+JcMHjldOdTA9iwv88gxoelCwln/NATglUuyzHMLJwx53n8NLqrnHALvbz
+RHjyTmjU4dbSM9o9Vjhcvq+1aipjAQg2qx825f7T4BMoKyhLBS/qTg7RfyW/h0Sbequ1wl
+PhBfwhv0LUphRFsGdnOgrXWfZqWqxOP1WhJWIh1p+ja5va/Ii/+hD6RORQjvzbHTPJA53c
+OguISImkx0vdqPuFTLyclaC3eO4Px68Ki0b8cdyivExbAWLkNOtBdIAgeO7Egbruu4O5Sn
+I6bn1Kc+kZlWtO02IkwSA5DaKw==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/test_ecdsa_521.key b/tests/test_ecdsa_521.key
new file mode 100644
index 0000000..b87dc90
--- /dev/null
+++ b/tests/test_ecdsa_521.key
@@ -0,0 +1,7 @@
+-----BEGIN EC PRIVATE KEY-----
+MIHcAgEBBEIAprQtAS3OF6iVUkT8IowTHWicHzShGgk86EtuEXvfQnhZFKsWm6Jo
+iqAr1yEaiuI9LfB3Xs8cjuhgEEfbduYr/f6gBwYFK4EEACOhgYkDgYYABACaOaFL
+ZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj
+4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRA
+L4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA==
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ecdsa_password_256.key b/tests/test_ecdsa_password_256.key
new file mode 100644
index 0000000..eb7910e
--- /dev/null
+++ b/tests/test_ecdsa_password_256.key
@@ -0,0 +1,8 @@
+-----BEGIN EC PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: AES-128-CBC,EEB56BC745EDB2DE04FC3FE1F8DA387E
+
+wdt7QTCa6ahTJLaEPH7NhHyBcxhzrzf93d4UwQOuAhkM6//jKD4lF9fErHBW0f3B
+ExberCU3UxfEF3xX2thXiLw47JgeOCeQUlqRFx92p36k6YmfNGX6W8CsZ3d+XodF
+Z+pb6m285CiSX+W95NenFMexXFsIpntiCvTifTKJ8os=
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ecdsa_password_384.key b/tests/test_ecdsa_password_384.key
new file mode 100644
index 0000000..eba33c1
--- /dev/null
+++ b/tests/test_ecdsa_password_384.key
@@ -0,0 +1,9 @@
+-----BEGIN EC PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: AES-128-CBC,7F7B5DBE4CE040D822441AFE7A023A1D
+
+y/d6tGonAXYgJniQoFCdto+CuT1y1s41qzwNLN9YdNq/+R/dtQvZAaOuGtHJRFE6
+wWabhY1bSjavVPT2z1Zw1jhDJX5HGrf9LDoyORKtUWtUJoUvGdYLHbcg8Q+//WRf
+R0A01YuSw1SJX0a225S1aRcsDAk1k5F8EMb8QzSSDgjAOI8ldQF35JI+ofNSGjgS
+BPOlorQXTJxDOGmokw/Wql6MbhajXKPO39H2Z53W88U=
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ecdsa_password_521.key b/tests/test_ecdsa_password_521.key
new file mode 100644
index 0000000..5986b93
--- /dev/null
+++ b/tests/test_ecdsa_password_521.key
@@ -0,0 +1,10 @@
+-----BEGIN EC PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: AES-128-CBC,AEB2DE62C65D1A88C4940A3476B2F10A
+
+5kNk/FFPbHa0402QTrgpIT28uirJ4Amvb2/ryOEyOCe0NPbTLCqlQekj2RFYH2Un
+pgCLUDkelKQv4pyuK8qWS7R+cFjE/gHHCPUWkK3djZUC8DKuA9lUKeQIE+V1vBHc
+L5G+MpoYrPgaydcGx/Uqnc/kVuZx1DXLwrGGtgwNROVBtmjXC9EdfeXHLL1y0wvH
+paNgacJpUtgqJEmiehf7eL/eiReegG553rZK3jjfboGkREUaKR5XOgamiKUtgKoc
+sMpImVYCsRKd/9RI+VOqErZaEvy/9j0Ye3iH32wGOaA=
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ed25519-funky-padding.key b/tests/test_ed25519-funky-padding.key
new file mode 100644
index 0000000..f178ca4
--- /dev/null
+++ b/tests/test_ed25519-funky-padding.key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACAHzPvYoDSkMVX52/CbA2M2aSBS7R0wt/9b2n5n+osNygAAAJAHZ1meB2dZ
+ngAAAAtzc2gtZWQyNTUxOQAAACAHzPvYoDSkMVX52/CbA2M2aSBS7R0wt/9b2n5n+osNyg
+AAAEAIyamvYUpzCovQuUtLhz+fwE4qYQo+rTuUVIX4fmTzMAfM+9igNKQxVfnb8JsDYzZp
+IFLtHTC3/1vafmf6iw3KAAAADW15IGNvbW1lbnQgaXM=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/test_ed25519-funky-padding_password.key b/tests/test_ed25519-funky-padding_password.key
new file mode 100644
index 0000000..1b135d6
--- /dev/null
+++ b/tests/test_ed25519-funky-padding_password.key
@@ -0,0 +1,8 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAACmFlczI1Ni1jdHIAAAAGYmNyeXB0AAAAGAAAABDo3dGRlE
+xKndv32nDnz2mHAAAAEAAAAAEAAAAzAAAAC3NzaC1lZDI1NTE5AAAAIDcAVH8yDxoiqj0O
+rX3YTRMsnvJr+XdKJW16YQpxx8UvAAAAoI78IY+u8lYOzxAEO2N8qEVQH8b/m27yQhcSbK
+q1RvvuHmql3NoQvjYQe9/om4oqE+uesNRnoQGNplBHCeroD3ZcksXhLGDhwTh577NR+NQ+
+GNYAK5Ex7Va3Xgao5HUYtBQXlXbtzY1Q+71hcOlRVNnLUDvwShdCa9o6ETIOGcZl04fbzv
+Z3vC1C68G3+JMNFenAGYU+iQq0XENtpT6xAIU=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/test_ed25519_password.key b/tests/test_ed25519_password.key
new file mode 100644
index 0000000..d178aaa
--- /dev/null
+++ b/tests/test_ed25519_password.key
@@ -0,0 +1,8 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAACmFlczI1Ni1jYmMAAAAGYmNyeXB0AAAAGAAAABDaKD4ac7
+kieb+UfXaLaw68AAAAEAAAAAEAAAAzAAAAC3NzaC1lZDI1NTE5AAAAIOQn7fjND5ozMSV3
+CvbEtIdT73hWCMRjzS/lRdUDw50xAAAAsE8kLGyYBnl9ihJNqv378y6mO3SkzrDbWXOnK6
+ij0vnuTAvcqvWHAnyu6qBbplu/W2m55ZFeAItgaEcV2/V76sh/sAKlERqrLFyXylN0xoOW
+NU5+zU08aTlbSKGmeNUU2xE/xfJq12U9XClIRuVUkUpYANxNPbmTRpVrbD3fgXMhK97Jrb
+DEn8ca1IqMPiYmd/hpe5+tq3OxyRljXjCUFWTnqkp9VvUdzSTdSGZHsW9i
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/test_file.py b/tests/test_file.py
new file mode 100644
index 0000000..9344495
--- /dev/null
+++ b/tests/test_file.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for the BufferedFile abstraction.
+"""
+
+import unittest
+from io import BytesIO
+
+from paramiko.common import linefeed_byte, crlf, cr_byte
+from paramiko.file import BufferedFile
+
+from ._util import needs_builtin
+
+
+class LoopbackFile(BufferedFile):
+ """
+ BufferedFile object that you can write data into, and then read it back.
+ """
+
+ def __init__(self, mode="r", bufsize=-1):
+ BufferedFile.__init__(self)
+ self._set_mode(mode, bufsize)
+ self.buffer = BytesIO()
+ self.offset = 0
+
+ def _read(self, size):
+ data = self.buffer.getvalue()[self.offset : self.offset + size]
+ self.offset += len(data)
+ return data
+
+ def _write(self, data):
+ self.buffer.write(data)
+ return len(data)
+
+
+class BufferedFileTest(unittest.TestCase):
+ def test_simple(self):
+ f = LoopbackFile("r")
+ try:
+ f.write(b"hi")
+ self.assertTrue(False, "no exception on write to read-only file")
+ except:
+ pass
+ f.close()
+
+ f = LoopbackFile("w")
+ try:
+ f.read(1)
+ self.assertTrue(False, "no exception to read from write-only file")
+ except:
+ pass
+ f.close()
+
+ def test_readline(self):
+ f = LoopbackFile("r+U")
+ f.write(
+ b"First line.\nSecond line.\r\nThird line.\n"
+ + b"Fourth line.\nFinal line non-terminated."
+ )
+
+ self.assertEqual(f.readline(), "First line.\n")
+ # universal newline mode should convert this linefeed:
+ self.assertEqual(f.readline(), "Second line.\n")
+ # truncated line:
+ self.assertEqual(f.readline(7), "Third l")
+ self.assertEqual(f.readline(), "ine.\n")
+ # newline should be detected and only the fourth line returned
+ self.assertEqual(f.readline(39), "Fourth line.\n")
+ self.assertEqual(f.readline(), "Final line non-terminated.")
+ self.assertEqual(f.readline(), "")
+ f.close()
+ try:
+ f.readline()
+ self.assertTrue(False, "no exception on readline of closed file")
+ except IOError:
+ pass
+ self.assertTrue(linefeed_byte in f.newlines)
+ self.assertTrue(crlf in f.newlines)
+ self.assertTrue(cr_byte not in f.newlines)
+
+ def test_lf(self):
+ """
+ try to trick the linefeed detector.
+ """
+ f = LoopbackFile("r+U")
+ f.write(b"First line.\r")
+ self.assertEqual(f.readline(), "First line.\n")
+ f.write(b"\nSecond.\r\n")
+ self.assertEqual(f.readline(), "Second.\n")
+ f.close()
+ self.assertEqual(f.newlines, crlf)
+
+ def test_write(self):
+ """
+ verify that write buffering is on.
+ """
+ f = LoopbackFile("r+", 1)
+ f.write(b"Complete line.\nIncomplete line.")
+ self.assertEqual(f.readline(), "Complete line.\n")
+ self.assertEqual(f.readline(), "")
+ f.write("..\n")
+ self.assertEqual(f.readline(), "Incomplete line...\n")
+ f.close()
+
+ def test_flush(self):
+ """
+ verify that flush will force a write.
+ """
+ f = LoopbackFile("r+", 512)
+ f.write("Not\nquite\n512 bytes.\n")
+ self.assertEqual(f.read(1), b"")
+ f.flush()
+ self.assertEqual(f.read(6), b"Not\nqu")
+ self.assertEqual(f.read(4), b"ite\n")
+ self.assertEqual(f.read(5), b"512 b")
+ self.assertEqual(f.read(9), b"ytes.\n")
+ self.assertEqual(f.read(3), b"")
+ f.close()
+
+ def test_buffering_flushes(self):
+ """
+ verify that flushing happens automatically on buffer crossing.
+ """
+ f = LoopbackFile("r+", 16)
+ f.write(b"Too small.")
+ self.assertEqual(f.read(4), b"")
+ f.write(b" ")
+ self.assertEqual(f.read(4), b"")
+ f.write(b"Enough.")
+ self.assertEqual(f.read(20), b"Too small. Enough.")
+ f.close()
+
+ def test_read_all(self):
+ """
+ verify that read(-1) returns everything left in the file.
+ """
+ f = LoopbackFile("r+", 16)
+ f.write(b"The first thing you need to do is open your eyes. ")
+ f.write(b"Then, you need to close them again.\n")
+ s = f.read(-1)
+ self.assertEqual(
+ s,
+ b"The first thing you need to do is open your eyes. Then, you "
+ + b"need to close them again.\n",
+ )
+ f.close()
+
+ def test_readable(self):
+ f = LoopbackFile("r")
+ self.assertTrue(f.readable())
+ self.assertFalse(f.writable())
+ self.assertFalse(f.seekable())
+ f.close()
+
+ def test_writable(self):
+ f = LoopbackFile("w")
+ self.assertTrue(f.writable())
+ self.assertFalse(f.readable())
+ self.assertFalse(f.seekable())
+ f.close()
+
+ def test_readinto(self):
+ data = bytearray(5)
+ f = LoopbackFile("r+")
+ f._write(b"hello")
+ f.readinto(data)
+ self.assertEqual(data, b"hello")
+ f.close()
+
+ def test_write_bad_type(self):
+ with LoopbackFile("wb") as f:
+ self.assertRaises(TypeError, f.write, object())
+
+ def test_write_unicode_as_binary(self):
+ text = "\xa7 why is writing text to a binary file allowed?\n"
+ with LoopbackFile("rb+") as f:
+ f.write(text)
+ self.assertEqual(f.read(), text.encode("utf-8"))
+
+ @needs_builtin("memoryview")
+ def test_write_bytearray(self):
+ with LoopbackFile("rb+") as f:
+ f.write(bytearray(12))
+ self.assertEqual(f.read(), 12 * b"\0")
+
+ @needs_builtin("buffer")
+ def test_write_buffer(self):
+ data = 3 * b"pretend giant block of data\n"
+ offsets = range(0, len(data), 8)
+ with LoopbackFile("rb+") as f:
+ for offset in offsets:
+ f.write(buffer(data, offset, 8)) # noqa
+ self.assertEqual(f.read(), data)
+
+ @needs_builtin("memoryview")
+ def test_write_memoryview(self):
+ data = 3 * b"pretend giant block of data\n"
+ offsets = range(0, len(data), 8)
+ with LoopbackFile("rb+") as f:
+ view = memoryview(data)
+ for offset in offsets:
+ f.write(view[offset : offset + 8])
+ self.assertEqual(f.read(), data)
+
+
+if __name__ == "__main__":
+ from unittest import main
+
+ main()
diff --git a/tests/test_gssapi.py b/tests/test_gssapi.py
new file mode 100644
index 0000000..da62fd9
--- /dev/null
+++ b/tests/test_gssapi.py
@@ -0,0 +1,225 @@
+# Copyright (C) 2013-2014 science + computing ag
+# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
+#
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Test the used APIs for GSS-API / SSPI authentication
+"""
+
+import socket
+
+from ._util import needs_gssapi, KerberosTestCase, update_env
+
+#
+# NOTE: KerberosTestCase skips all tests if it was unable to import k5test
+# third-party library. That's the primary trigger for whether this module
+# effectively gets run or not. See tests/util.py for other triggers (a set of
+# env vars a human might have defined).
+#
+
+
+@needs_gssapi
+class GSSAPITest(KerberosTestCase):
+ def setUp(self):
+ super().setUp()
+ # TODO: these vars should all come from os.environ or whatever the
+ # approved pytest method is for runtime-configuring test data.
+ self.krb5_mech = "1.2.840.113554.1.2.2"
+ self.targ_name = self.realm.hostname
+ self.server_mode = False
+ update_env(self, self.realm.env)
+
+ def test_pyasn1(self):
+ """
+ Test the used methods of pyasn1.
+ """
+ from pyasn1.type.univ import ObjectIdentifier
+ from pyasn1.codec.der import encoder, decoder
+
+ oid = encoder.encode(ObjectIdentifier(self.krb5_mech))
+ mech, __ = decoder.decode(oid)
+ self.assertEquals(self.krb5_mech, mech.__str__())
+
+ def _gssapi_sspi_test(self):
+ """
+ Test the used methods of python-gssapi or sspi, sspicon from pywin32.
+ """
+ try:
+ import gssapi
+
+ if (
+ hasattr(gssapi, "__title__")
+ and gssapi.__title__ == "python-gssapi"
+ ):
+ _API = "PYTHON-GSSAPI-OLD"
+ else:
+ _API = "PYTHON-GSSAPI-NEW"
+ except ImportError:
+ import sspicon
+ import sspi
+
+ _API = "SSPI"
+
+ c_token = None
+ gss_ctxt_status = False
+ mic_msg = b"G'day Mate!"
+
+ if _API == "PYTHON-GSSAPI-OLD":
+ if self.server_mode:
+ gss_flags = (
+ gssapi.C_PROT_READY_FLAG,
+ gssapi.C_INTEG_FLAG,
+ gssapi.C_MUTUAL_FLAG,
+ gssapi.C_DELEG_FLAG,
+ )
+ else:
+ gss_flags = (
+ gssapi.C_PROT_READY_FLAG,
+ gssapi.C_INTEG_FLAG,
+ gssapi.C_DELEG_FLAG,
+ )
+ # Initialize a GSS-API context.
+ ctx = gssapi.Context()
+ ctx.flags = gss_flags
+ krb5_oid = gssapi.OID.mech_from_string(self.krb5_mech)
+ target_name = gssapi.Name(
+ "host@" + self.targ_name, gssapi.C_NT_HOSTBASED_SERVICE
+ )
+ gss_ctxt = gssapi.InitContext(
+ peer_name=target_name, mech_type=krb5_oid, req_flags=ctx.flags
+ )
+ if self.server_mode:
+ c_token = gss_ctxt.step(c_token)
+ gss_ctxt_status = gss_ctxt.established
+ self.assertEquals(False, gss_ctxt_status)
+ # Accept a GSS-API context.
+ gss_srv_ctxt = gssapi.AcceptContext()
+ s_token = gss_srv_ctxt.step(c_token)
+ gss_ctxt_status = gss_srv_ctxt.established
+ self.assertNotEquals(None, s_token)
+ self.assertEquals(True, gss_ctxt_status)
+ # Establish the client context
+ c_token = gss_ctxt.step(s_token)
+ self.assertEquals(None, c_token)
+ else:
+ while not gss_ctxt.established:
+ c_token = gss_ctxt.step(c_token)
+ self.assertNotEquals(None, c_token)
+ # Build MIC
+ mic_token = gss_ctxt.get_mic(mic_msg)
+
+ if self.server_mode:
+ # Check MIC
+ status = gss_srv_ctxt.verify_mic(mic_msg, mic_token)
+ self.assertEquals(0, status)
+ elif _API == "PYTHON-GSSAPI-NEW":
+ if self.server_mode:
+ gss_flags = (
+ gssapi.RequirementFlag.protection_ready,
+ gssapi.RequirementFlag.integrity,
+ gssapi.RequirementFlag.mutual_authentication,
+ gssapi.RequirementFlag.delegate_to_peer,
+ )
+ else:
+ gss_flags = (
+ gssapi.RequirementFlag.protection_ready,
+ gssapi.RequirementFlag.integrity,
+ gssapi.RequirementFlag.delegate_to_peer,
+ )
+ # Initialize a GSS-API context.
+ krb5_oid = gssapi.MechType.kerberos
+ target_name = gssapi.Name(
+ "host@" + self.targ_name,
+ name_type=gssapi.NameType.hostbased_service,
+ )
+ gss_ctxt = gssapi.SecurityContext(
+ name=target_name,
+ flags=gss_flags,
+ mech=krb5_oid,
+ usage="initiate",
+ )
+ if self.server_mode:
+ c_token = gss_ctxt.step(c_token)
+ gss_ctxt_status = gss_ctxt.complete
+ self.assertEquals(False, gss_ctxt_status)
+ # Accept a GSS-API context.
+ gss_srv_ctxt = gssapi.SecurityContext(usage="accept")
+ s_token = gss_srv_ctxt.step(c_token)
+ gss_ctxt_status = gss_srv_ctxt.complete
+ self.assertNotEquals(None, s_token)
+ self.assertEquals(True, gss_ctxt_status)
+ # Establish the client context
+ c_token = gss_ctxt.step(s_token)
+ self.assertEquals(None, c_token)
+ else:
+ while not gss_ctxt.complete:
+ c_token = gss_ctxt.step(c_token)
+ self.assertNotEquals(None, c_token)
+ # Build MIC
+ mic_token = gss_ctxt.get_signature(mic_msg)
+
+ if self.server_mode:
+ # Check MIC
+ status = gss_srv_ctxt.verify_signature(mic_msg, mic_token)
+ self.assertEquals(0, status)
+ else:
+ gss_flags = (
+ sspicon.ISC_REQ_INTEGRITY
+ | sspicon.ISC_REQ_MUTUAL_AUTH
+ | sspicon.ISC_REQ_DELEGATE
+ )
+ # Initialize a GSS-API context.
+ target_name = "host/" + socket.getfqdn(self.targ_name)
+ gss_ctxt = sspi.ClientAuth(
+ "Kerberos", scflags=gss_flags, targetspn=target_name
+ )
+ if self.server_mode:
+ error, token = gss_ctxt.authorize(c_token)
+ c_token = token[0].Buffer
+ self.assertEquals(0, error)
+ # Accept a GSS-API context.
+ gss_srv_ctxt = sspi.ServerAuth("Kerberos", spn=target_name)
+ error, token = gss_srv_ctxt.authorize(c_token)
+ s_token = token[0].Buffer
+ # Establish the context.
+ error, token = gss_ctxt.authorize(s_token)
+ c_token = token[0].Buffer
+ self.assertEquals(None, c_token)
+ self.assertEquals(0, error)
+ # Build MIC
+ mic_token = gss_ctxt.sign(mic_msg)
+ # Check MIC
+ gss_srv_ctxt.verify(mic_msg, mic_token)
+ else:
+ error, token = gss_ctxt.authorize(c_token)
+ c_token = token[0].Buffer
+ self.assertNotEquals(0, error)
+
+ def test_gssapi_sspi_client(self):
+ """
+ Test the used methods of python-gssapi or sspi, sspicon from pywin32.
+ """
+ self._gssapi_sspi_test()
+
+ def test_gssapi_sspi_server(self):
+ """
+ Test the used methods of python-gssapi or sspi, sspicon from pywin32.
+ """
+ self.server_mode = True
+ self._gssapi_sspi_test()
diff --git a/tests/test_hostkeys.py b/tests/test_hostkeys.py
new file mode 100644
index 0000000..a028411
--- /dev/null
+++ b/tests/test_hostkeys.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for HostKeys.
+"""
+
+from base64 import decodebytes
+from binascii import hexlify
+import os
+import unittest
+
+import paramiko
+
+
+test_hosts_file = """\
+secure.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA1PD6U2/TVxET6lkpKhOk5r\
+9q/kAYG6sP9f5zuUYP8i7FOFp/6ncCEbbtg/lB+A3iidyxoSWl+9jtoyyDOOVX4UIDV9G11Ml8om3\
+D+jrpI9cycZHqilK0HmxDeCuxbwyMuaCygU9gS2qoRvNLWZk70OpIKSSpBo0Wl3/XUmz9uhc=
+broken.example.com ssh-rsa AAAA
+happy.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA8bP1ZA7DCZDB9J0s50l31M\
+BGQ3GQ/Fc7SX6gkpXkwcZryoi4kNFhHu5LvHcZPdxXV1D+uTMfGS1eyd2Yz/DoNWXNAl8TI0cAsW\
+5ymME3bQ4J/k1IKxCtz/bAlAqFgKoc+EolMziDYqWIATtW0rYTJvzGAzTmMj80/QpsFH+Pc2M=
+modern.example.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKHEChAIxsh2hr8Q\
++Ea1AAHZyfEB2elEc2YgduVzBtp+
+curvy.example.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlz\
+dHAyNTYAAABBBAa+pY7djSpbg5viAcZhPt56AO3U3Sd7h7dnlUp0EjfDgyYHYQxl2QZ4JGgfwR5iv9\
+T9iRZjQzvJd5s+kBAZtpk=
+"""
+
+test_hosts_file_tabs = """\
+secure.example.com\tssh-rsa\tAAAAB3NzaC1yc2EAAAABIwAAAIEA1PD6U2/TVxET6lkpKhOk5r\
+9q/kAYG6sP9f5zuUYP8i7FOFp/6ncCEbbtg/lB+A3iidyxoSWl+9jtoyyDOOVX4UIDV9G11Ml8om3\
+D+jrpI9cycZHqilK0HmxDeCuxbwyMuaCygU9gS2qoRvNLWZk70OpIKSSpBo0Wl3/XUmz9uhc=
+happy.example.com\tssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA8bP1ZA7DCZDB9J0s50l31M\
+BGQ3GQ/Fc7SX6gkpXkwcZryoi4kNFhHu5LvHcZPdxXV1D+uTMfGS1eyd2Yz/DoNWXNAl8TI0cAsW\
+5ymME3bQ4J/k1IKxCtz/bAlAqFgKoc+EolMziDYqWIATtW0rYTJvzGAzTmMj80/QpsFH+Pc2M=
+modern.example.com\tssh-ed25519\tAAAAC3NzaC1lZDI1NTE5AAAAIKHEChAIxsh2hr8Q\
++Ea1AAHZyfEB2elEc2YgduVzBtp+
+curvy.example.com\tecdsa-sha2-nistp256\tAAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbml\
+zdHAyNTYAAABBBAa+pY7djSpbg5viAcZhPt56AO3U3Sd7h7dnlUp0EjfDgyYHYQxl2QZ4JGgfwR5iv\
+9T9iRZjQzvJd5s+kBAZtpk=
+"""
+
+keyblob = b"""\
+AAAAB3NzaC1yc2EAAAABIwAAAIEA8bP1ZA7DCZDB9J0s50l31MBGQ3GQ/Fc7SX6gkpXkwcZryoi4k\
+NFhHu5LvHcZPdxXV1D+uTMfGS1eyd2Yz/DoNWXNAl8TI0cAsW5ymME3bQ4J/k1IKxCtz/bAlAqFgK\
+oc+EolMziDYqWIATtW0rYTJvzGAzTmMj80/QpsFH+Pc2M="""
+
+keyblob_dss = b"""\
+AAAAB3NzaC1kc3MAAACBAOeBpgNnfRzr/twmAQRu2XwWAp3CFtrVnug6s6fgwj/oLjYbVtjAy6pl/\
+h0EKCWx2rf1IetyNsTxWrniA9I6HeDj65X1FyDkg6g8tvCnaNB8Xp/UUhuzHuGsMIipRxBxw9LF60\
+8EqZcj1E3ytktoW5B5OcjrkEoz3xG7C+rpIjYvAAAAFQDwz4UnmsGiSNu5iqjn3uTzwUpshwAAAIE\
+AkxfFeY8P2wZpDjX0MimZl5wkoFQDL25cPzGBuB4OnB8NoUk/yjAHIIpEShw8V+LzouMK5CTJQo5+\
+Ngw3qIch/WgRmMHy4kBq1SsXMjQCte1So6HBMvBPIW5SiMTmjCfZZiw4AYHK+B/JaOwaG9yRg2Ejg\
+4Ok10+XFDxlqZo8Y+wAAACARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmnjO\
+1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacIBlXa8cMDL7Q/69o\
+0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgE="""
+
+
+class HostKeysTest(unittest.TestCase):
+ def setUp(self):
+ with open("hostfile.temp", "w") as f:
+ f.write(test_hosts_file)
+
+ def tearDown(self):
+ os.unlink("hostfile.temp")
+
+ def test_load(self):
+ hostdict = paramiko.HostKeys("hostfile.temp")
+ assert len(hostdict) == 4
+ self.assertEqual(1, len(list(hostdict.values())[0]))
+ self.assertEqual(1, len(list(hostdict.values())[1]))
+ fp = hexlify(
+ hostdict["secure.example.com"]["ssh-rsa"].get_fingerprint()
+ ).upper()
+ self.assertEqual(b"E6684DB30E109B67B70FF1DC5C7F1363", fp)
+
+ def test_add(self):
+ hostdict = paramiko.HostKeys("hostfile.temp")
+ hh = "|1|BMsIC6cUIP2zBuXR3t2LRcJYjzM=|hpkJMysjTk/+zzUUzxQEa2ieq6c="
+ key = paramiko.RSAKey(data=decodebytes(keyblob))
+ hostdict.add(hh, "ssh-rsa", key)
+ assert len(hostdict) == 5
+ x = hostdict["foo.example.com"]
+ fp = hexlify(x["ssh-rsa"].get_fingerprint()).upper()
+ self.assertEqual(b"7EC91BB336CB6D810B124B1353C32396", fp)
+ self.assertTrue(hostdict.check("foo.example.com", key))
+
+ def test_dict(self):
+ hostdict = paramiko.HostKeys("hostfile.temp")
+ self.assertTrue("secure.example.com" in hostdict)
+ self.assertTrue("not.example.com" not in hostdict)
+ self.assertTrue("secure.example.com" in hostdict)
+ self.assertTrue("not.example.com" not in hostdict)
+ x = hostdict.get("secure.example.com", None)
+ self.assertTrue(x is not None)
+ fp = hexlify(x["ssh-rsa"].get_fingerprint()).upper()
+ self.assertEqual(b"E6684DB30E109B67B70FF1DC5C7F1363", fp)
+ assert list(hostdict) == hostdict.keys()
+ assert len(list(hostdict)) == len(hostdict.keys()) == 4
+
+ def test_dict_set(self):
+ hostdict = paramiko.HostKeys("hostfile.temp")
+ key = paramiko.RSAKey(data=decodebytes(keyblob))
+ key_dss = paramiko.DSSKey(data=decodebytes(keyblob_dss))
+ hostdict["secure.example.com"] = {"ssh-rsa": key, "ssh-dss": key_dss}
+ hostdict["fake.example.com"] = {}
+ hostdict["fake.example.com"]["ssh-rsa"] = key
+
+ assert len(hostdict) == 5
+ self.assertEqual(2, len(list(hostdict.values())[0]))
+ self.assertEqual(1, len(list(hostdict.values())[1]))
+ self.assertEqual(1, len(list(hostdict.values())[2]))
+ fp = hexlify(
+ hostdict["secure.example.com"]["ssh-rsa"].get_fingerprint()
+ ).upper()
+ self.assertEqual(b"7EC91BB336CB6D810B124B1353C32396", fp)
+ fp = hexlify(
+ hostdict["secure.example.com"]["ssh-dss"].get_fingerprint()
+ ).upper()
+ self.assertEqual(b"4478F0B9A23CC5182009FF755BC1D26C", fp)
+
+ def test_delitem(self):
+ hostdict = paramiko.HostKeys("hostfile.temp")
+ target = "happy.example.com"
+ hostdict[target] # will KeyError if not present
+ del hostdict[target]
+ try:
+ hostdict[target]
+ except KeyError:
+ pass # Good
+ else:
+ assert False, "Entry was not deleted from HostKeys on delitem!"
+
+ def test_entry_delitem(self):
+ hostdict = paramiko.HostKeys("hostfile.temp")
+ target = "happy.example.com"
+ entry = hostdict[target]
+ key_type_list = [key_type for key_type in entry]
+ for key_type in key_type_list:
+ del entry[key_type]
+
+ # will KeyError if not present
+ for key_type in key_type_list:
+ try:
+ del entry[key_type]
+ except KeyError:
+ pass # Good
+ else:
+ assert False, "Key was not deleted from Entry on delitem!"
+
+
+class HostKeysTabsTest(HostKeysTest):
+ def setUp(self):
+ with open("hostfile.temp", "w") as f:
+ f.write(test_hosts_file_tabs)
diff --git a/tests/test_kex.py b/tests/test_kex.py
new file mode 100644
index 0000000..c3bf2b0
--- /dev/null
+++ b/tests/test_kex.py
@@ -0,0 +1,668 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for the key exchange protocols.
+"""
+
+from binascii import hexlify, unhexlify
+import os
+import unittest
+
+from unittest.mock import Mock, patch
+import pytest
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import ec
+
+try:
+ from cryptography.hazmat.primitives.asymmetric import x25519
+except ImportError:
+ x25519 = None
+
+import paramiko.util
+from paramiko.kex_group1 import KexGroup1
+from paramiko.kex_group14 import KexGroup14SHA256
+from paramiko.kex_gex import KexGex, KexGexSHA256
+from paramiko import Message
+from paramiko.common import byte_chr
+from paramiko.kex_ecdh_nist import KexNistp256
+from paramiko.kex_group16 import KexGroup16SHA512
+from paramiko.kex_curve25519 import KexCurve25519
+
+
+def dummy_urandom(n):
+ return byte_chr(0xCC) * n
+
+
+def dummy_generate_key_pair(obj):
+ private_key_value = 94761803665136558137557783047955027733968423115106677159790289642479432803037 # noqa
+ public_key_numbers = "042bdab212fa8ba1b7c843301682a4db424d307246c7e1e6083c41d9ca7b098bf30b3d63e2ec6278488c135360456cc054b3444ecc45998c08894cbc1370f5f989" # noqa
+ public_key_numbers_obj = ec.EllipticCurvePublicKey.from_encoded_point(
+ ec.SECP256R1(), unhexlify(public_key_numbers)
+ ).public_numbers()
+ obj.P = ec.EllipticCurvePrivateNumbers(
+ private_value=private_key_value, public_numbers=public_key_numbers_obj
+ ).private_key(default_backend())
+ if obj.transport.server_mode:
+ obj.Q_S = ec.EllipticCurvePublicKey.from_encoded_point(
+ ec.SECP256R1(), unhexlify(public_key_numbers)
+ )
+ return
+ obj.Q_C = ec.EllipticCurvePublicKey.from_encoded_point(
+ ec.SECP256R1(), unhexlify(public_key_numbers)
+ )
+
+
+class FakeKey:
+ def __str__(self):
+ return "fake-key"
+
+ def asbytes(self):
+ return b"fake-key"
+
+ def sign_ssh_data(self, H, algorithm):
+ return b"fake-sig"
+
+
+class FakeModulusPack:
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
+ G = 2
+
+ def get_modulus(self, min, ask, max):
+ return self.G, self.P
+
+
+class FakeTransport:
+ local_version = "SSH-2.0-paramiko_1.0"
+ remote_version = "SSH-2.0-lame"
+ local_kex_init = "local-kex-init"
+ remote_kex_init = "remote-kex-init"
+ host_key_type = "fake-key"
+
+ def _send_message(self, m):
+ self._message = m
+
+ def _expect_packet(self, *t):
+ self._expect = t
+
+ def _set_K_H(self, K, H):
+ self._K = K
+ self._H = H
+
+ def _verify_key(self, host_key, sig):
+ self._verify = (host_key, sig)
+
+ def _activate_outbound(self):
+ self._activated = True
+
+ def _log(self, level, s):
+ pass
+
+ def get_server_key(self):
+ return FakeKey()
+
+ def _get_modulus_pack(self):
+ return FakeModulusPack()
+
+
+class KexTest(unittest.TestCase):
+
+ K = 14730343317708716439807310032871972459448364195094179797249681733965528989482751523943515690110179031004049109375612685505881911274101441415545039654102474376472240501616988799699744135291070488314748284283496055223852115360852283821334858541043710301057312858051901453919067023103730011648890038847384890504 # noqa
+
+ def setUp(self):
+ self._original_urandom = os.urandom
+ os.urandom = dummy_urandom
+ self._original_generate_key_pair = KexNistp256._generate_key_pair
+ KexNistp256._generate_key_pair = dummy_generate_key_pair
+
+ if KexCurve25519.is_available():
+ static_x25519_key = x25519.X25519PrivateKey.from_private_bytes(
+ unhexlify(
+ b"2184abc7eb3e656d2349d2470ee695b570c227340c2b2863b6c9ff427af1f040" # noqa
+ )
+ )
+ mock_x25519 = Mock()
+ mock_x25519.generate.return_value = static_x25519_key
+ patcher = patch(
+ "paramiko.kex_curve25519.X25519PrivateKey", mock_x25519
+ )
+ patcher.start()
+ self.x25519_patcher = patcher
+
+ def tearDown(self):
+ os.urandom = self._original_urandom
+ KexNistp256._generate_key_pair = self._original_generate_key_pair
+ if hasattr(self, "x25519_patcher"):
+ self.x25519_patcher.stop()
+
+ def test_group1_client(self):
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexGroup1(transport)
+ kex.start_kex()
+ x = b"1E000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_group1._MSG_KEXDH_REPLY,), transport._expect
+ )
+
+ # fake "reply"
+ msg = Message()
+ msg.add_string("fake-host-key")
+ msg.add_mpint(69)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_group1._MSG_KEXDH_REPLY, msg)
+ H = b"03079780F3D3AD0B3C6DB30C8D21685F367A86D2"
+ self.assertEqual(self.K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_group1_server(self):
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexGroup1(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (paramiko.kex_group1._MSG_KEXDH_INIT,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(69)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_group1._MSG_KEXDH_INIT, msg)
+ H = b"B16BF34DD10945EDE84E9C1EF24A14BFDC843389"
+ x = b"1F0000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" # noqa
+ self.assertEqual(self.K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertTrue(transport._activated)
+
+ def test_gex_client(self):
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexGex(transport)
+ kex.start_kex()
+ x = b"22000004000000080000002000"
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(FakeModulusPack.P)
+ msg.add_mpint(FakeModulusPack.G)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
+ x = b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_string("fake-host-key")
+ msg.add_mpint(69)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
+ H = b"A265563F2FA87F1A89BF007EE90D58BE2E4A4BD0"
+ self.assertEqual(self.K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_gex_old_client(self):
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexGex(transport)
+ kex.start_kex(_test_old_style=True)
+ x = b"1E00000800"
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(FakeModulusPack.P)
+ msg.add_mpint(FakeModulusPack.G)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
+ x = b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_string("fake-host-key")
+ msg.add_mpint(69)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
+ H = b"807F87B269EF7AC5EC7E75676808776A27D5864C"
+ self.assertEqual(self.K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_gex_server(self):
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexGex(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST,
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD,
+ ),
+ transport._expect,
+ )
+
+ msg = Message()
+ msg.add_int(1024)
+ msg.add_int(2048)
+ msg.add_int(4096)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg)
+ x = b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(12345)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
+ K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 # noqa
+ H = b"CE754197C21BF3452863B4F44D0B3951F12516EF"
+ x = b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" # noqa
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertTrue(transport._activated)
+
+ def test_gex_server_with_old_client(self):
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexGex(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST,
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD,
+ ),
+ transport._expect,
+ )
+
+ msg = Message()
+ msg.add_int(2048)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg)
+ x = b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(12345)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
+ K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 # noqa
+ H = b"B41A06B2E59043CEFC1AE16EC31F1E2D12EC455B"
+ x = b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" # noqa
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertTrue(transport._activated)
+
+ def test_gex_sha256_client(self):
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexGexSHA256(transport)
+ kex.start_kex()
+ x = b"22000004000000080000002000"
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(FakeModulusPack.P)
+ msg.add_mpint(FakeModulusPack.G)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
+ x = b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_string("fake-host-key")
+ msg.add_mpint(69)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
+ H = b"AD1A9365A67B4496F05594AD1BF656E3CDA0851289A4C1AFF549FEAE50896DF4"
+ self.assertEqual(self.K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_gex_sha256_old_client(self):
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexGexSHA256(transport)
+ kex.start_kex(_test_old_style=True)
+ x = b"1E00000800"
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(FakeModulusPack.P)
+ msg.add_mpint(FakeModulusPack.G)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg)
+ x = b"20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_string("fake-host-key")
+ msg.add_mpint(69)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg)
+ H = b"518386608B15891AE5237DEE08DCADDE76A0BCEFCE7F6DB3AD66BC41D256DFE5"
+ self.assertEqual(self.K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_gex_sha256_server(self):
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexGexSHA256(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST,
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD,
+ ),
+ transport._expect,
+ )
+
+ msg = Message()
+ msg.add_int(1024)
+ msg.add_int(2048)
+ msg.add_int(4096)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg)
+ x = b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(12345)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
+ K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 # noqa
+ H = b"CCAC0497CF0ABA1DBF55E1A3995D17F4CC31824B0E8D95CDF8A06F169D050D80"
+ x = b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" # noqa
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertTrue(transport._activated)
+
+ def test_gex_sha256_server_with_old_client(self):
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexGexSHA256(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST,
+ paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD,
+ ),
+ transport._expect,
+ )
+
+ msg = Message()
+ msg.add_int(2048)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg)
+ x = b"1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(12345)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg)
+ K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 # noqa
+ H = b"3DDD2AD840AD095E397BA4D0573972DC60F6461FD38A187CACA6615A5BC8ADBB"
+ x = b"210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967" # noqa
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertTrue(transport._activated)
+
+ def test_kex_nistp256_client(self):
+ K = 91610929826364598472338906427792435253694642563583721654249504912114314269754 # noqa
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexNistp256(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY,), transport._expect
+ )
+
+ # fake reply
+ msg = Message()
+ msg.add_string("fake-host-key")
+ Q_S = unhexlify(
+ "043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210" # noqa
+ )
+ msg.add_string(Q_S)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY, msg)
+ H = b"BAF7CE243A836037EB5D2221420F35C02B9AB6C957FE3BDE3369307B9612570A"
+ self.assertEqual(K, kex.transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_kex_nistp256_server(self):
+ K = 91610929826364598472338906427792435253694642563583721654249504912114314269754 # noqa
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexNistp256(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT,), transport._expect
+ )
+
+ # fake init
+ msg = Message()
+ Q_C = unhexlify(
+ "043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210" # noqa
+ )
+ H = b"2EF4957AFD530DD3F05DBEABF68D724FACC060974DA9704F2AEE4C3DE861E7CA"
+ msg.add_string(Q_C)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT, msg)
+ self.assertEqual(K, transport._K)
+ self.assertTrue(transport._activated)
+ self.assertEqual(H, hexlify(transport._H).upper())
+
+ def test_kex_group14_sha256_client(self):
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexGroup14SHA256(transport)
+ kex.start_kex()
+ x = b"1E00000101009850B3A8DE3ECCD3F19644139137C93D9C11BC28ED8BE850908EE294E1D43B88B9295311EFAEF5B736A1B652EBE184CCF36CFB0681C1ED66430088FA448B83619F928E7B9592ED6160EC11D639D51C303603F930F743C646B1B67DA38A1D44598DCE6C3F3019422B898044141420E9A10C29B9C58668F7F20A40F154B2C4768FCF7A9AA7179FB6366A7167EE26DD58963E8B880A0572F641DE0A73DC74C930F7C3A0C9388553F3F8403E40CF8B95FEDB1D366596FCF3FDDEB21A0005ADA650EF1733628D807BE5ACB83925462765D9076570056E39994FB328E3108FE406275758D6BF5F32790EF15D8416BF5548164859E785DB45E7787BB0E727ADE08641ED" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_group1._MSG_KEXDH_REPLY,), transport._expect
+ )
+
+ # fake "reply"
+ msg = Message()
+ msg.add_string("fake-host-key")
+ msg.add_mpint(69)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_group1._MSG_KEXDH_REPLY, msg)
+ K = 21526936926159575624241589599003964979640840086252478029709904308461709651400109485351462666820496096345766733042945918306284902585618061272525323382142547359684512114160415969631877620660064043178086464811345023251493620331559440565662862858765724251890489795332144543057725932216208403143759943169004775947331771556537814494448612329251887435553890674764339328444948425882382475260315505741818518926349729970262019325118040559191290279100613049085709127598666890434114956464502529053036826173452792849566280474995114751780998069614898221773345705289637708545219204637224261997310181473787577166103031529148842107599 # noqa
+ H = b"D007C23686BE8A7737F828DC9E899F8EB5AF423F495F138437BE2529C1B8455F"
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_kex_group14_sha256_server(self):
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexGroup14SHA256(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (paramiko.kex_group1._MSG_KEXDH_INIT,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(69)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_group1._MSG_KEXDH_INIT, msg)
+ K = 21526936926159575624241589599003964979640840086252478029709904308461709651400109485351462666820496096345766733042945918306284902585618061272525323382142547359684512114160415969631877620660064043178086464811345023251493620331559440565662862858765724251890489795332144543057725932216208403143759943169004775947331771556537814494448612329251887435553890674764339328444948425882382475260315505741818518926349729970262019325118040559191290279100613049085709127598666890434114956464502529053036826173452792849566280474995114751780998069614898221773345705289637708545219204637224261997310181473787577166103031529148842107599 # noqa
+ H = b"15080A19894D489ACD0DA724480E1B08E71293E07EBC25FAD10F263C00B343DC"
+ x = b"1F0000000866616B652D6B657900000101009850B3A8DE3ECCD3F19644139137C93D9C11BC28ED8BE850908EE294E1D43B88B9295311EFAEF5B736A1B652EBE184CCF36CFB0681C1ED66430088FA448B83619F928E7B9592ED6160EC11D639D51C303603F930F743C646B1B67DA38A1D44598DCE6C3F3019422B898044141420E9A10C29B9C58668F7F20A40F154B2C4768FCF7A9AA7179FB6366A7167EE26DD58963E8B880A0572F641DE0A73DC74C930F7C3A0C9388553F3F8403E40CF8B95FEDB1D366596FCF3FDDEB21A0005ADA650EF1733628D807BE5ACB83925462765D9076570056E39994FB328E3108FE406275758D6BF5F32790EF15D8416BF5548164859E785DB45E7787BB0E727ADE08641ED0000000866616B652D736967" # noqa
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertTrue(transport._activated)
+
+ def test_kex_group16_sha512_client(self):
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexGroup16SHA512(transport)
+ kex.start_kex()
+ x = b"1E0000020100859FF55A23E0F66463561DD8BFC4764C69C05F85665B06EC9E29EF5003A53A8FA890B6A6EB624DEB55A4FB279DE7010A53580A126817E3D235B05A1081662B1500961D0625F0AAD287F1B597CBA9DB9550D9CC26355C4C59F92E613B5C21AC191F152C09A5DB46DCBA5EA58E3CA6A8B0EB7183E27FAC10106022E8521FA91240FB389060F1E1E4A355049D29DCC82921CE6588791743E4B1DEEE0166F7CC5180C3C75F3773342DF95C8C10AAA5D12975257027936B99B3DED6E6E98CF27EADEAEAE04E7F0A28071F578646B985FCE28A59CEB36287CB65759BE0544D4C4018CDF03C9078FE9CA79ECA611CB6966899E6FD29BE0781491C659FE2380E0D99D50D9CFAAB94E61BE311779719C4C43C6D223AD3799C3915A9E55076A21152DBBF911D6594296D6ECDC1B6FA71997CD29DF987B80FCA7F36BB7F19863C72BBBF839746AFBF9A5B407D468C976AA3E36FA118D3EAAD2E08BF6AE219F81F2CE2BE946337F06CC09BBFABE938A4087E413921CBEC1965ED905999B83396ECA226110CDF6EFB80F815F6489AF87561DA3857F13A7705921306D94176231FBB336B17C3724BC17A28BECB910093AB040873D5D760E8C182B88ECCE3E38DDA68CE35BD152DF7550BD908791FCCEDD1FFDF5ED2A57FFAE79599E487A7726D8A3D950B1729A08FBB60EE462A6BBE8BF0F5F0E1358129A37840FE5B3EEB8BF26E99FA222EAE83" # noqa
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertEqual(
+ (paramiko.kex_group1._MSG_KEXDH_REPLY,), transport._expect
+ )
+
+ # fake "reply"
+ msg = Message()
+ msg.add_string("fake-host-key")
+ msg.add_mpint(69)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_group1._MSG_KEXDH_REPLY, msg)
+ K = 933242830095376162107925500057692534838883186615567574891154103836907630698358649443101764908667358576734565553213003142941996368306996312915844839972197961603283544950658467545799914435739152351344917376359963584614213874232577733869049670230112638724993540996854599166318001059065780674008011575015459772051180901213815080343343801745386220342919837913506966863570473712948197760657442974564354432738520446202131551650771882909329069340612274196233658123593466135642819578182367229641847749149740891990379052266213711500434128970973602206842980669193719602075489724202241641553472106310932258574377789863734311328542715212248147206865762697424822447603031087553480483833829498375309975229907460562402877655519980113688369262871485777790149373908739910846630414678346163764464587129010141922982925829457954376352735653834300282864445132624993186496129911208133529828461690634463092007726349795944930302881758403402084584307180896465875803621285362317770276493727205689466142632599776710824902573926951951209239626732358074877997756011804454926541386215567756538832824717436605031489511654178384081883801272314328403020205577714999460724519735573055540814037716770051316113795603990199374791348798218428912977728347485489266146775472 # noqa
+ H = b"F6E2BCC846B9B62591EFB86663D55D4769CA06B2EDABE469DF831639B2DDD5A271985011900A724CB2C87F19F347B3632A7C1536AF3D12EE463E6EA75281AF0C" # noqa
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ def test_kex_group16_sha512_server(self):
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexGroup16SHA512(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (paramiko.kex_group1._MSG_KEXDH_INIT,), transport._expect
+ )
+
+ msg = Message()
+ msg.add_mpint(69)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_group1._MSG_KEXDH_INIT, msg)
+ K = 933242830095376162107925500057692534838883186615567574891154103836907630698358649443101764908667358576734565553213003142941996368306996312915844839972197961603283544950658467545799914435739152351344917376359963584614213874232577733869049670230112638724993540996854599166318001059065780674008011575015459772051180901213815080343343801745386220342919837913506966863570473712948197760657442974564354432738520446202131551650771882909329069340612274196233658123593466135642819578182367229641847749149740891990379052266213711500434128970973602206842980669193719602075489724202241641553472106310932258574377789863734311328542715212248147206865762697424822447603031087553480483833829498375309975229907460562402877655519980113688369262871485777790149373908739910846630414678346163764464587129010141922982925829457954376352735653834300282864445132624993186496129911208133529828461690634463092007726349795944930302881758403402084584307180896465875803621285362317770276493727205689466142632599776710824902573926951951209239626732358074877997756011804454926541386215567756538832824717436605031489511654178384081883801272314328403020205577714999460724519735573055540814037716770051316113795603990199374791348798218428912977728347485489266146775472 # noqa
+ H = b"F97BB05A572A663688CA7EA1AA812D3C82EE6C8FA9D4B1D69435783D931157F199909EA38B003E4E4385C8861183CBFF0CF0EF1433A8B3C69AB4DD9420FCC85F" # noqa
+ x = b"1F0000000866616B652D6B65790000020100859FF55A23E0F66463561DD8BFC4764C69C05F85665B06EC9E29EF5003A53A8FA890B6A6EB624DEB55A4FB279DE7010A53580A126817E3D235B05A1081662B1500961D0625F0AAD287F1B597CBA9DB9550D9CC26355C4C59F92E613B5C21AC191F152C09A5DB46DCBA5EA58E3CA6A8B0EB7183E27FAC10106022E8521FA91240FB389060F1E1E4A355049D29DCC82921CE6588791743E4B1DEEE0166F7CC5180C3C75F3773342DF95C8C10AAA5D12975257027936B99B3DED6E6E98CF27EADEAEAE04E7F0A28071F578646B985FCE28A59CEB36287CB65759BE0544D4C4018CDF03C9078FE9CA79ECA611CB6966899E6FD29BE0781491C659FE2380E0D99D50D9CFAAB94E61BE311779719C4C43C6D223AD3799C3915A9E55076A21152DBBF911D6594296D6ECDC1B6FA71997CD29DF987B80FCA7F36BB7F19863C72BBBF839746AFBF9A5B407D468C976AA3E36FA118D3EAAD2E08BF6AE219F81F2CE2BE946337F06CC09BBFABE938A4087E413921CBEC1965ED905999B83396ECA226110CDF6EFB80F815F6489AF87561DA3857F13A7705921306D94176231FBB336B17C3724BC17A28BECB910093AB040873D5D760E8C182B88ECCE3E38DDA68CE35BD152DF7550BD908791FCCEDD1FFDF5ED2A57FFAE79599E487A7726D8A3D950B1729A08FBB60EE462A6BBE8BF0F5F0E1358129A37840FE5B3EEB8BF26E99FA222EAE830000000866616B652D736967" # noqa
+ self.assertEqual(K, transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual(x, hexlify(transport._message.asbytes()).upper())
+ self.assertTrue(transport._activated)
+
+ @pytest.mark.skipif("not KexCurve25519.is_available()")
+ def test_kex_c25519_client(self):
+ K = 71294722834835117201316639182051104803802881348227506835068888449366462300724 # noqa
+ transport = FakeTransport()
+ transport.server_mode = False
+ kex = KexCurve25519(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (paramiko.kex_curve25519._MSG_KEXECDH_REPLY,), transport._expect
+ )
+
+ # fake reply
+ msg = Message()
+ msg.add_string("fake-host-key")
+ Q_S = unhexlify(
+ "8d13a119452382a1ada8eea4c979f3e63ad3f0c7366786d6c5b54b87219bae49"
+ )
+ msg.add_string(Q_S)
+ msg.add_string("fake-sig")
+ msg.rewind()
+ kex.parse_next(paramiko.kex_curve25519._MSG_KEXECDH_REPLY, msg)
+ H = b"05B6F6437C0CF38D1A6C5A6F6E2558DEB54E7FC62447EBFB1E5D7407326A5475"
+ self.assertEqual(K, kex.transport._K)
+ self.assertEqual(H, hexlify(transport._H).upper())
+ self.assertEqual((b"fake-host-key", b"fake-sig"), transport._verify)
+ self.assertTrue(transport._activated)
+
+ @pytest.mark.skipif("not KexCurve25519.is_available()")
+ def test_kex_c25519_server(self):
+ K = 71294722834835117201316639182051104803802881348227506835068888449366462300724 # noqa
+ transport = FakeTransport()
+ transport.server_mode = True
+ kex = KexCurve25519(transport)
+ kex.start_kex()
+ self.assertEqual(
+ (paramiko.kex_curve25519._MSG_KEXECDH_INIT,), transport._expect
+ )
+
+ # fake init
+ msg = Message()
+ Q_C = unhexlify(
+ "8d13a119452382a1ada8eea4c979f3e63ad3f0c7366786d6c5b54b87219bae49"
+ )
+ H = b"DF08FCFCF31560FEE639D9B6D56D760BC3455B5ADA148E4514181023E7A9B042"
+ msg.add_string(Q_C)
+ msg.rewind()
+ kex.parse_next(paramiko.kex_curve25519._MSG_KEXECDH_INIT, msg)
+ self.assertEqual(K, transport._K)
+ self.assertTrue(transport._activated)
+ self.assertEqual(H, hexlify(transport._H).upper())
diff --git a/tests/test_kex_gss.py b/tests/test_kex_gss.py
new file mode 100644
index 0000000..a81b195
--- /dev/null
+++ b/tests/test_kex_gss.py
@@ -0,0 +1,154 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+# Copyright (C) 2013-2014 science + computing ag
+# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
+#
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Unit Tests for the GSS-API / SSPI SSHv2 Diffie-Hellman Key Exchange and user
+authentication
+"""
+
+
+import socket
+import threading
+import unittest
+
+import paramiko
+
+from ._util import needs_gssapi, KerberosTestCase, update_env, _support
+
+
+class NullServer(paramiko.ServerInterface):
+ def get_allowed_auths(self, username):
+ return "gssapi-keyex"
+
+ def check_auth_gssapi_keyex(
+ self, username, gss_authenticated=paramiko.AUTH_FAILED, cc_file=None
+ ):
+ if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def enable_auth_gssapi(self):
+ UseGSSAPI = True
+ return UseGSSAPI
+
+ def check_channel_request(self, kind, chanid):
+ return paramiko.OPEN_SUCCEEDED
+
+ def check_channel_exec_request(self, channel, command):
+ if command != b"yes":
+ return False
+ return True
+
+
+@needs_gssapi
+class GSSKexTest(KerberosTestCase):
+ def setUp(self):
+ self.username = self.realm.user_princ
+ self.hostname = socket.getfqdn(self.realm.hostname)
+ self.sockl = socket.socket()
+ self.sockl.bind((self.realm.hostname, 0))
+ self.sockl.listen(1)
+ self.addr, self.port = self.sockl.getsockname()
+ self.event = threading.Event()
+ update_env(self, self.realm.env)
+ thread = threading.Thread(target=self._run)
+ thread.start()
+
+ def tearDown(self):
+ for attr in "tc ts socks sockl".split():
+ if hasattr(self, attr):
+ getattr(self, attr).close()
+
+ def _run(self):
+ self.socks, addr = self.sockl.accept()
+ self.ts = paramiko.Transport(self.socks, gss_kex=True)
+ host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
+ self.ts.add_server_key(host_key)
+ self.ts.set_gss_host(self.realm.hostname)
+ try:
+ self.ts.load_server_moduli()
+ except:
+ print("(Failed to load moduli -- gex will be unsupported.)")
+ server = NullServer()
+ self.ts.start_server(self.event, server)
+
+ def _test_gsskex_and_auth(self, gss_host, rekey=False):
+ """
+ Verify that Paramiko can handle SSHv2 GSS-API / SSPI authenticated
+ Diffie-Hellman Key Exchange and user authentication with the GSS-API
+ context created during key exchange.
+ """
+ host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = paramiko.RSAKey(data=host_key.asbytes())
+
+ self.tc = paramiko.SSHClient()
+ self.tc.get_host_keys().add(
+ f"[{self.hostname}]:{self.port}", "ssh-rsa", public_host_key
+ )
+ self.tc.connect(
+ self.hostname,
+ self.port,
+ username=self.username,
+ gss_auth=True,
+ gss_kex=True,
+ gss_host=gss_host,
+ )
+
+ self.event.wait(1.0)
+ self.assert_(self.event.is_set())
+ self.assert_(self.ts.is_active())
+ self.assertEquals(self.username, self.ts.get_username())
+ self.assertEquals(True, self.ts.is_authenticated())
+ self.assertEquals(True, self.tc.get_transport().gss_kex_used)
+
+ stdin, stdout, stderr = self.tc.exec_command("yes")
+ schan = self.ts.accept(1.0)
+ if rekey:
+ self.tc.get_transport().renegotiate_keys()
+
+ schan.send("Hello there.\n")
+ schan.send_stderr("This is on stderr.\n")
+ schan.close()
+
+ self.assertEquals("Hello there.\n", stdout.readline())
+ self.assertEquals("", stdout.readline())
+ self.assertEquals("This is on stderr.\n", stderr.readline())
+ self.assertEquals("", stderr.readline())
+
+ stdin.close()
+ stdout.close()
+ stderr.close()
+
+ def test_gsskex_and_auth(self):
+ """
+ Verify that Paramiko can handle SSHv2 GSS-API / SSPI authenticated
+ Diffie-Hellman Key Exchange and user authentication with the GSS-API
+ context created during key exchange.
+ """
+ self._test_gsskex_and_auth(gss_host=None)
+
+ # To be investigated, see https://github.com/paramiko/paramiko/issues/1312
+ @unittest.expectedFailure
+ def test_gsskex_and_auth_rekey(self):
+ """
+ Verify that Paramiko can rekey.
+ """
+ self._test_gsskex_and_auth(gss_host=None, rekey=True)
diff --git a/tests/test_message.py b/tests/test_message.py
new file mode 100644
index 0000000..3c5f961
--- /dev/null
+++ b/tests/test_message.py
@@ -0,0 +1,113 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for ssh protocol message blocks.
+"""
+
+import unittest
+
+from paramiko.message import Message
+from paramiko.common import byte_chr, zero_byte
+
+
+class MessageTest(unittest.TestCase):
+
+ __a = (
+ b"\x00\x00\x00\x17\x07\x60\xe0\x90\x00\x00\x00\x01\x71\x00\x00\x00\x05\x68\x65\x6c\x6c\x6f\x00\x00\x03\xe8" # noqa
+ + b"x" * 1000
+ )
+ __b = b"\x01\x00\xf3\x00\x3f\x00\x00\x00\x10\x68\x75\x65\x79\x2c\x64\x65\x77\x65\x79\x2c\x6c\x6f\x75\x69\x65" # noqa
+ __c = b"\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\xf5\xe4\xd3\xc2\xb1\x09\x00\x00\x00\x01\x11\x00\x00\x00\x07\x00\xf5\xe4\xd3\xc2\xb1\x09\x00\x00\x00\x06\x9a\x1b\x2c\x3d\x4e\xf7" # noqa
+ __d = b"\x00\x00\x00\x05\xff\x00\x00\x00\x05\x11\x22\x33\x44\x55\xff\x00\x00\x00\x0a\x00\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x63\x61\x74\x00\x00\x00\x03\x61\x2c\x62" # noqa
+
+ def test_encode(self):
+ msg = Message()
+ msg.add_int(23)
+ msg.add_int(123789456)
+ msg.add_string("q")
+ msg.add_string("hello")
+ msg.add_string("x" * 1000)
+ self.assertEqual(msg.asbytes(), self.__a)
+
+ msg = Message()
+ msg.add_boolean(True)
+ msg.add_boolean(False)
+ msg.add_byte(byte_chr(0xF3))
+
+ msg.add_bytes(zero_byte + byte_chr(0x3F))
+ msg.add_list(["huey", "dewey", "louie"])
+ self.assertEqual(msg.asbytes(), self.__b)
+
+ msg = Message()
+ msg.add_int64(5)
+ msg.add_int64(0xF5E4D3C2B109)
+ msg.add_mpint(17)
+ msg.add_mpint(0xF5E4D3C2B109)
+ msg.add_mpint(-0x65E4D3C2B109)
+ self.assertEqual(msg.asbytes(), self.__c)
+
+ def test_decode(self):
+ msg = Message(self.__a)
+ self.assertEqual(msg.get_int(), 23)
+ self.assertEqual(msg.get_int(), 123789456)
+ self.assertEqual(msg.get_text(), "q")
+ self.assertEqual(msg.get_text(), "hello")
+ self.assertEqual(msg.get_text(), "x" * 1000)
+
+ msg = Message(self.__b)
+ self.assertEqual(msg.get_boolean(), True)
+ self.assertEqual(msg.get_boolean(), False)
+ self.assertEqual(msg.get_byte(), byte_chr(0xF3))
+ self.assertEqual(msg.get_bytes(2), zero_byte + byte_chr(0x3F))
+ self.assertEqual(msg.get_list(), ["huey", "dewey", "louie"])
+
+ msg = Message(self.__c)
+ self.assertEqual(msg.get_int64(), 5)
+ self.assertEqual(msg.get_int64(), 0xF5E4D3C2B109)
+ self.assertEqual(msg.get_mpint(), 17)
+ self.assertEqual(msg.get_mpint(), 0xF5E4D3C2B109)
+ self.assertEqual(msg.get_mpint(), -0x65E4D3C2B109)
+
+ def test_add(self):
+ msg = Message()
+ msg.add(5)
+ msg.add(0x1122334455)
+ msg.add(0xF00000000000000000)
+ msg.add(True)
+ msg.add("cat")
+ msg.add(["a", "b"])
+ self.assertEqual(msg.asbytes(), self.__d)
+
+ def test_misc(self):
+ msg = Message(self.__d)
+ self.assertEqual(msg.get_adaptive_int(), 5)
+ self.assertEqual(msg.get_adaptive_int(), 0x1122334455)
+ self.assertEqual(msg.get_adaptive_int(), 0xF00000000000000000)
+ self.assertEqual(msg.get_so_far(), self.__d[:29])
+ self.assertEqual(msg.get_remainder(), self.__d[29:])
+ msg.rewind()
+ self.assertEqual(msg.get_adaptive_int(), 5)
+ self.assertEqual(msg.get_so_far(), self.__d[:4])
+ self.assertEqual(msg.get_remainder(), self.__d[4:])
+
+ def test_bytes_str_and_repr(self):
+ msg = Message(self.__d)
+ assert str(msg) == f"paramiko.Message({self.__d!r})"
+ assert repr(msg) == str(msg)
+ assert bytes(msg) == msg.asbytes() == self.__d
diff --git a/tests/test_packetizer.py b/tests/test_packetizer.py
new file mode 100644
index 0000000..aee21c2
--- /dev/null
+++ b/tests/test_packetizer.py
@@ -0,0 +1,148 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for the ssh2 protocol in Transport.
+"""
+
+import sys
+import unittest
+from hashlib import sha1
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
+
+from paramiko import Message, Packetizer, util
+from paramiko.common import byte_chr, zero_byte
+
+from ._loop import LoopSocket
+
+
+x55 = byte_chr(0x55)
+x1f = byte_chr(0x1F)
+
+
+class PacketizerTest(unittest.TestCase):
+ def test_write(self):
+ rsock = LoopSocket()
+ wsock = LoopSocket()
+ rsock.link(wsock)
+ p = Packetizer(wsock)
+ p.set_log(util.get_logger("paramiko.transport"))
+ p.set_hexdump(True)
+ encryptor = Cipher(
+ algorithms.AES(zero_byte * 16),
+ modes.CBC(x55 * 16),
+ backend=default_backend(),
+ ).encryptor()
+ p.set_outbound_cipher(encryptor, 16, sha1, 12, x1f * 20)
+
+ # message has to be at least 16 bytes long, so we'll have at least one
+ # block of data encrypted that contains zero random padding bytes
+ m = Message()
+ m.add_byte(byte_chr(100))
+ m.add_int(100)
+ m.add_int(1)
+ m.add_int(900)
+ p.send_message(m)
+ data = rsock.recv(100)
+ # 32 + 12 bytes of MAC = 44
+ self.assertEqual(44, len(data))
+ self.assertEqual(
+ b"\x43\x91\x97\xbd\x5b\x50\xac\x25\x87\xc2\xc4\x6b\xc7\xe9\x38\xc0", # noqa
+ data[:16],
+ )
+
+ def test_read(self):
+ rsock = LoopSocket()
+ wsock = LoopSocket()
+ rsock.link(wsock)
+ p = Packetizer(rsock)
+ p.set_log(util.get_logger("paramiko.transport"))
+ p.set_hexdump(True)
+ decryptor = Cipher(
+ algorithms.AES(zero_byte * 16),
+ modes.CBC(x55 * 16),
+ backend=default_backend(),
+ ).decryptor()
+ p.set_inbound_cipher(decryptor, 16, sha1, 12, x1f * 20)
+ wsock.send(
+ b"\x43\x91\x97\xbd\x5b\x50\xac\x25\x87\xc2\xc4\x6b\xc7\xe9\x38\xc0\x90\xd2\x16\x56\x0d\x71\x73\x61\x38\x7c\x4c\x3d\xfb\x97\x7d\xe2\x6e\x03\xb1\xa0\xc2\x1c\xd6\x41\x41\x4c\xb4\x59" # noqa
+ )
+ cmd, m = p.read_message()
+ self.assertEqual(100, cmd)
+ self.assertEqual(100, m.get_int())
+ self.assertEqual(1, m.get_int())
+ self.assertEqual(900, m.get_int())
+
+ def test_closed(self):
+ if sys.platform.startswith("win"): # no SIGALRM on windows
+ return
+ rsock = LoopSocket()
+ wsock = LoopSocket()
+ rsock.link(wsock)
+ p = Packetizer(wsock)
+ p.set_log(util.get_logger("paramiko.transport"))
+ p.set_hexdump(True)
+ encryptor = Cipher(
+ algorithms.AES(zero_byte * 16),
+ modes.CBC(x55 * 16),
+ backend=default_backend(),
+ ).encryptor()
+ p.set_outbound_cipher(encryptor, 16, sha1, 12, x1f * 20)
+
+ # message has to be at least 16 bytes long, so we'll have at least one
+ # block of data encrypted that contains zero random padding bytes
+ m = Message()
+ m.add_byte(byte_chr(100))
+ m.add_int(100)
+ m.add_int(1)
+ m.add_int(900)
+ wsock.send = lambda x: 0
+ from functools import wraps
+ import errno
+ import os
+ import signal
+
+ class TimeoutError(Exception):
+ def __init__(self, error_message):
+ if hasattr(errno, "ETIME"):
+ self.message = os.sterror(errno.ETIME)
+ else:
+ self.messaage = error_message
+
+ def timeout(seconds=1, error_message="Timer expired"):
+ def decorator(func):
+ def _handle_timeout(signum, frame):
+ raise TimeoutError(error_message)
+
+ def wrapper(*args, **kwargs):
+ signal.signal(signal.SIGALRM, _handle_timeout)
+ signal.alarm(seconds)
+ try:
+ result = func(*args, **kwargs)
+ finally:
+ signal.alarm(0)
+ return result
+
+ return wraps(func)(wrapper)
+
+ return decorator
+
+ send = timeout()(p.send_message)
+ self.assertRaises(EOFError, send, m)
diff --git a/tests/test_pkey.py b/tests/test_pkey.py
new file mode 100644
index 0000000..d4d193b
--- /dev/null
+++ b/tests/test_pkey.py
@@ -0,0 +1,696 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for public/private key objects.
+"""
+
+import unittest
+import os
+import stat
+from binascii import hexlify
+from hashlib import md5
+from io import StringIO
+
+from paramiko import (
+ RSAKey,
+ DSSKey,
+ ECDSAKey,
+ Ed25519Key,
+ Message,
+ util,
+ SSHException,
+)
+from paramiko.util import b
+from paramiko.common import o600, byte_chr
+
+from cryptography.exceptions import UnsupportedAlgorithm
+from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateNumbers
+from unittest.mock import patch, Mock
+import pytest
+
+from ._util import _support, is_low_entropy, requires_sha1_signing
+
+
+# from openssh's ssh-keygen
+PUB_RSA = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4c=" # noqa
+PUB_DSS = "ssh-dss AAAAB3NzaC1kc3MAAACBAOeBpgNnfRzr/twmAQRu2XwWAp3CFtrVnug6s6fgwj/oLjYbVtjAy6pl/h0EKCWx2rf1IetyNsTxWrniA9I6HeDj65X1FyDkg6g8tvCnaNB8Xp/UUhuzHuGsMIipRxBxw9LF608EqZcj1E3ytktoW5B5OcjrkEoz3xG7C+rpIjYvAAAAFQDwz4UnmsGiSNu5iqjn3uTzwUpshwAAAIEAkxfFeY8P2wZpDjX0MimZl5wkoFQDL25cPzGBuB4OnB8NoUk/yjAHIIpEShw8V+LzouMK5CTJQo5+Ngw3qIch/WgRmMHy4kBq1SsXMjQCte1So6HBMvBPIW5SiMTmjCfZZiw4AYHK+B/JaOwaG9yRg2Ejg4Ok10+XFDxlqZo8Y+wAAACARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmnjO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacIBlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgE=" # noqa
+PUB_ECDSA_256 = "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJSPZm3ZWkvk/Zx8WP+fZRZ5/NBBHnGQwR6uIC6XHGPDIHuWUzIjAwA0bzqkOUffEsbLe+uQgKl5kbc/L8KA/eo=" # noqa
+PUB_ECDSA_384 = "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBbGibQLW9AAZiGN2hEQxWYYoFaWKwN3PKSaDJSMqmIn1Z9sgRUuw8Y/w502OGvXL/wFk0i2z50l3pWZjD7gfMH7gX5TUiCzwrQkS+Hn1U2S9aF5WJp0NcIzYxXw2r4M2A==" # noqa
+PUB_ECDSA_521 = "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACaOaFLZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRAL4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA==" # noqa
+PUB_RSA_2K_OPENSSH = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDF+Dpr54DX0WdeTDpNAMdkCWEkl3OXtNgf58qlN1gX572OLBqLf0zT4bHstUEpU3piazph/rSWcUMuBoD46tZ6jiH7H9b9Pem2eYQWaELDDkM+v9BMbEy5rMbFRLol5OtEvPFqneyEAanPOgvd8t3yyhSev9QVusakzJ8j8LGgrA8huYZ+Srnw0shEWLG70KUKCh3rG0QIvA8nfhtUOisr2Gp+F0YxMGb5gwBlQYAYE5l6u1SjZ7hNjyNosjK+wRBFgFFBYVpkZKJgWoK9w4ijFyzMZTucnZMqKOKAjIJvHfKBf2/cEfYxSq1EndqTqjYsd9T7/s2vcn1OH5a0wkER" # noqa
+RSA_2K_OPENSSH_P = 161773687847617758886803946572654778625119997081005961935077336594287351354258259920334554906235187683459069634729972458348855793639393524799865799559575414247668746919721196359908321800753913350455861871582087986355637886875933045224711827701526739934602161222599672381604211130651397331775901258858869418853 # noqa
+RSA_2K_OPENSSH_Q = 154483416325630619558401349033571772244816915504195060221073502923720741119664820208064202825686848103224453777955988437823797692957091438442833606009978046057345917301441832647551208158342812551003395417862260727795454409459089912659057393394458150862012620127030757893820711157099494238156383382454310199869 # noqa
+PUB_DSS_1K_OPENSSH = "ssh-dss AAAAB3NzaC1kc3MAAACBAL8XEx7F9xuwBNles+vWpNF+YcofrBhjX1r5QhpBe0eoYWLHRcroN6lxwCdGYRfgOoRjTncBiixQX/uUxAY96zDh3ir492s2BcJt4ihvNn/AY0I0OTuX/2IwGk9CGzafjaeZNVYxMa8lcVt0hSOTjkPQ7gVuk6bJzMInvie+VWKLAAAAFQDUgYdY+rhR0SkKbC09BS/SIHcB+wAAAIB44+4zpCNcd0CGvZlowH99zyPX8uxQtmTLQFuR2O8O0FgVVuCdDgD0D9W8CLOp32oatpM0jyyN89EdvSWzjHzZJ+L6H1FtZps7uhpDFWHdva1R25vyGecLMUuXjo5t/D7oCDih+HwHoSAxoi0QvsPd8/qqHQVznNJKtR6thUpXEwAAAIAG4DCBjbgTTgpBw0egRkJwBSz0oTt+1IcapNU2jA6N8urMSk9YXHEQHKN68BAF3YJ59q2Ujv3LOXmBqGd1T+kzwUszfMlgzq8MMu19Yfzse6AIK1Agn1Vj6F7YXLsXDN+T4KszX5+FJa7t/Zsp3nALWy6l0f4WKivEF5Y2QpEFcQ==" # noqa
+PUB_EC_384_OPENSSH = "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBIch5LXTq/L/TWsTGG6dIktxD8DIMh7EfvoRmWsks6CuNDTvFvbQNtY4QO1mn5OXegHbS0M5DPIS++wpKGFP3suDEH08O35vZQasLNrL0tO2jyyEnzB2ZEx3PPYci811yg==" # noqa
+
+FINGER_RSA = "1024 60:73:38:44:cb:51:86:65:7f:de:da:a2:2b:5a:57:d5"
+FINGER_DSS = "1024 44:78:f0:b9:a2:3c:c5:18:20:09:ff:75:5b:c1:d2:6c"
+FINGER_ECDSA_256 = "256 25:19:eb:55:e6:a1:47:ff:4f:38:d2:75:6f:a5:d5:60"
+FINGER_ECDSA_384 = "384 c1:8d:a0:59:09:47:41:8e:a8:a6:07:01:29:23:b4:65"
+FINGER_ECDSA_521 = "521 44:58:22:52:12:33:16:0e:ce:0e:be:2c:7c:7e:cc:1e"
+SIGNED_RSA = "20:d7:8a:31:21:cb:f7:92:12:f2:a4:89:37:f5:78:af:e6:16:b6:25:b9:97:3d:a2:cd:5f:ca:20:21:73:4c:ad:34:73:8f:20:77:28:e2:94:15:08:d8:91:40:7a:85:83:bf:18:37:95:dc:54:1a:9b:88:29:6c:73:ca:38:b4:04:f1:56:b9:f2:42:9d:52:1b:29:29:b4:4f:fd:c9:2d:af:47:d2:40:76:30:f3:63:45:0c:d9:1d:43:86:0f:1c:70:e2:93:12:34:f3:ac:c5:0a:2f:14:50:66:59:f1:88:ee:c1:4a:e9:d1:9c:4e:46:f0:0e:47:6f:38:74:f1:44:a8" # noqa
+SIGNED_RSA_256 = "cc:6:60:e0:0:2c:ac:9e:26:bc:d5:68:64:3f:9f:a7:e5:aa:41:eb:88:4a:25:5:9c:93:84:66:ef:ef:60:f4:34:fb:f4:c8:3d:55:33:6a:77:bd:b2:ee:83:f:71:27:41:7e:f5:7:5:0:a9:4c:7:80:6f:be:76:67:cb:58:35:b9:2b:f3:c2:d3:3c:ee:e1:3f:59:e0:fa:e4:5c:92:ed:ae:74:de:d:d6:27:16:8f:84:a3:86:68:c:94:90:7d:6e:cc:81:12:d8:b6:ad:aa:31:a8:13:3d:63:81:3e:bb:5:b6:38:4d:2:d:1b:5b:70:de:83:cc:3a:cb:31" # noqa
+SIGNED_RSA_512 = "87:46:8b:75:92:33:78:a0:22:35:32:39:23:c6:ab:e1:6:92:ad:bc:7f:6e:ab:19:32:e4:78:b2:2c:8f:1d:c:65:da:fc:a5:7:ca:b6:55:55:31:83:b1:a0:af:d1:95:c5:2e:af:56:ba:f5:41:64:f:39:9d:af:82:43:22:8f:90:52:9d:89:e7:45:97:df:f3:f2:bc:7b:3a:db:89:e:34:fd:18:62:25:1b:ef:77:aa:c6:6c:99:36:3a:84:d6:9c:2a:34:8c:7f:f4:bb:c9:a5:9a:6c:11:f2:cf:da:51:5e:1e:7f:90:27:34:de:b2:f3:15:4f:db:47:32:6b:a7" # noqa
+FINGER_RSA_2K_OPENSSH = "2048 68:d1:72:01:bf:c0:0c:66:97:78:df:ce:75:74:46:d6"
+FINGER_DSS_1K_OPENSSH = "1024 cf:1d:eb:d7:61:d3:12:94:c6:c0:c6:54:35:35:b0:82"
+FINGER_EC_384_OPENSSH = "384 72:14:df:c1:9a:c3:e6:0e:11:29:d6:32:18:7b:ea:9b"
+
+RSA_PRIVATE_OUT = """\
+-----BEGIN RSA PRIVATE KEY-----
+MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
+oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
+d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
+gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
+EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
+soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
+tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
+avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
+4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
+H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
+qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
+HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
+nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
+-----END RSA PRIVATE KEY-----
+"""
+
+DSS_PRIVATE_OUT = """\
+-----BEGIN DSA PRIVATE KEY-----
+MIIBuwIBAAKBgQDngaYDZ30c6/7cJgEEbtl8FgKdwhba1Z7oOrOn4MI/6C42G1bY
+wMuqZf4dBCglsdq39SHrcjbE8Vq54gPSOh3g4+uV9Rcg5IOoPLbwp2jQfF6f1FIb
+sx7hrDCIqUcQccPSxetPBKmXI9RN8rZLaFuQeTnI65BKM98Ruwvq6SI2LwIVAPDP
+hSeawaJI27mKqOfe5PPBSmyHAoGBAJMXxXmPD9sGaQ419DIpmZecJKBUAy9uXD8x
+gbgeDpwfDaFJP8owByCKREocPFfi86LjCuQkyUKOfjYMN6iHIf1oEZjB8uJAatUr
+FzI0ArXtUqOhwTLwTyFuUojE5own2WYsOAGByvgfyWjsGhvckYNhI4ODpNdPlxQ8
+ZamaPGPsAoGARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmn
+jO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacI
+BlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgECFGI9QPSc
+h9pT9XHqn+1rZ4bK+QGA
+-----END DSA PRIVATE KEY-----
+"""
+
+ECDSA_PRIVATE_OUT_256 = """\
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIKB6ty3yVyKEnfF/zprx0qwC76MsMlHY4HXCnqho2eKioAoGCCqGSM49
+AwEHoUQDQgAElI9mbdlaS+T9nHxY/59lFnn80EEecZDBHq4gLpccY8Mge5ZTMiMD
+ADRvOqQ5R98Sxst765CAqXmRtz8vwoD96g==
+-----END EC PRIVATE KEY-----
+"""
+
+ECDSA_PRIVATE_OUT_384 = """\
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBDdO8IXvlLJgM7+sNtPl7tI7FM5kzuEUEEPRjXIPQM7mISciwJPBt+
+y43EuG8nL4mgBwYFK4EEACKhZANiAAQWxom0C1vQAGYhjdoREMVmGKBWlisDdzyk
+mgyUjKpiJ9WfbIEVLsPGP8OdNjhr1y/8BZNIts+dJd6VmYw+4HzB+4F+U1Igs8K0
+JEvh59VNkvWheViadDXCM2MV8Nq+DNg=
+-----END EC PRIVATE KEY-----
+"""
+
+ECDSA_PRIVATE_OUT_521 = """\
+-----BEGIN EC PRIVATE KEY-----
+MIHcAgEBBEIAprQtAS3OF6iVUkT8IowTHWicHzShGgk86EtuEXvfQnhZFKsWm6Jo
+iqAr1yEaiuI9LfB3Xs8cjuhgEEfbduYr/f6gBwYFK4EEACOhgYkDgYYABACaOaFL
+ZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj
+4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRA
+L4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA==
+-----END EC PRIVATE KEY-----
+"""
+
+x1234 = b"\x01\x02\x03\x04"
+
+TEST_KEY_BYTESTR = "\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01#\x00\x00\x00\x00ӏV\x07k%<\x1fT$E#>ғfD\x18 \x0cae#̬S#VlE\x1epvo\x17M߉DUXL<\x06\x10דw\u2bd5ٿw˟0)#y{\x10l\tPru\t\x19Π\u070e/f0yFmm\x1f" # noqa
+
+
+class KeyTest(unittest.TestCase):
+ def assert_keyfile_is_encrypted(self, keyfile):
+ """
+ A quick check that filename looks like an encrypted key.
+ """
+ with open(keyfile, "r") as fh:
+ self.assertEqual(
+ fh.readline()[:-1], "-----BEGIN RSA PRIVATE KEY-----"
+ )
+ self.assertEqual(fh.readline()[:-1], "Proc-Type: 4,ENCRYPTED")
+ self.assertEqual(fh.readline()[0:10], "DEK-Info: ")
+
+ def test_generate_key_bytes(self):
+ key = util.generate_key_bytes(md5, x1234, "happy birthday", 30)
+ exp = b"\x61\xE1\xF2\x72\xF4\xC1\xC4\x56\x15\x86\xBD\x32\x24\x98\xC0\xE9\x24\x67\x27\x80\xF4\x7B\xB3\x7D\xDA\x7D\x54\x01\x9E\x64" # noqa
+ self.assertEqual(exp, key)
+
+ def test_load_rsa(self):
+ key = RSAKey.from_private_key_file(_support("rsa.key"))
+ self.assertEqual("ssh-rsa", key.get_name())
+ exp_rsa = b(FINGER_RSA.split()[1].replace(":", ""))
+ my_rsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_rsa, my_rsa)
+ self.assertEqual(PUB_RSA.split()[1], key.get_base64())
+ self.assertEqual(1024, key.get_bits())
+
+ s = StringIO()
+ key.write_private_key(s)
+ self.assertEqual(RSA_PRIVATE_OUT, s.getvalue())
+ s.seek(0)
+ key2 = RSAKey.from_private_key(s)
+ self.assertEqual(key, key2)
+
+ def test_load_rsa_transmutes_crypto_exceptions(self):
+ # TODO: nix unittest for pytest
+ for exception in (TypeError("onoz"), UnsupportedAlgorithm("oops")):
+ with patch(
+ "paramiko.rsakey.serialization.load_der_private_key"
+ ) as loader:
+ loader.side_effect = exception
+ with pytest.raises(SSHException, match=str(exception)):
+ RSAKey.from_private_key_file(_support("rsa.key"))
+
+ def test_loading_empty_keys_errors_usefully(self):
+ # #1599 - raise SSHException instead of IndexError
+ with pytest.raises(SSHException, match="no lines"):
+ RSAKey.from_private_key_file(_support("blank_rsa.key"))
+
+ def test_load_rsa_password(self):
+ key = RSAKey.from_private_key_file(
+ _support("test_rsa_password.key"), "television"
+ )
+ self.assertEqual("ssh-rsa", key.get_name())
+ exp_rsa = b(FINGER_RSA.split()[1].replace(":", ""))
+ my_rsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_rsa, my_rsa)
+ self.assertEqual(PUB_RSA.split()[1], key.get_base64())
+ self.assertEqual(1024, key.get_bits())
+
+ def test_load_dss(self):
+ key = DSSKey.from_private_key_file(_support("dss.key"))
+ self.assertEqual("ssh-dss", key.get_name())
+ exp_dss = b(FINGER_DSS.split()[1].replace(":", ""))
+ my_dss = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_dss, my_dss)
+ self.assertEqual(PUB_DSS.split()[1], key.get_base64())
+ self.assertEqual(1024, key.get_bits())
+
+ s = StringIO()
+ key.write_private_key(s)
+ self.assertEqual(DSS_PRIVATE_OUT, s.getvalue())
+ s.seek(0)
+ key2 = DSSKey.from_private_key(s)
+ self.assertEqual(key, key2)
+
+ def test_load_dss_password(self):
+ key = DSSKey.from_private_key_file(
+ _support("test_dss_password.key"), "television"
+ )
+ self.assertEqual("ssh-dss", key.get_name())
+ exp_dss = b(FINGER_DSS.split()[1].replace(":", ""))
+ my_dss = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_dss, my_dss)
+ self.assertEqual(PUB_DSS.split()[1], key.get_base64())
+ self.assertEqual(1024, key.get_bits())
+
+ def test_compare_rsa(self):
+ # verify that the private & public keys compare equal
+ key = RSAKey.from_private_key_file(_support("rsa.key"))
+ self.assertEqual(key, key)
+ pub = RSAKey(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ def test_compare_dss(self):
+ # verify that the private & public keys compare equal
+ key = DSSKey.from_private_key_file(_support("dss.key"))
+ self.assertEqual(key, key)
+ pub = DSSKey(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ def _sign_and_verify_rsa(self, algorithm, saved_sig):
+ key = RSAKey.from_private_key_file(_support("rsa.key"))
+ msg = key.sign_ssh_data(b"ice weasels", algorithm)
+ assert isinstance(msg, Message)
+ msg.rewind()
+ assert msg.get_text() == algorithm
+ expected = b"".join(
+ [byte_chr(int(x, 16)) for x in saved_sig.split(":")]
+ )
+ assert msg.get_binary() == expected
+ msg.rewind()
+ pub = RSAKey(data=key.asbytes())
+ self.assertTrue(pub.verify_ssh_sig(b"ice weasels", msg))
+
+ @requires_sha1_signing
+ def test_sign_and_verify_ssh_rsa(self):
+ self._sign_and_verify_rsa("ssh-rsa", SIGNED_RSA)
+
+ def test_sign_and_verify_rsa_sha2_512(self):
+ self._sign_and_verify_rsa("rsa-sha2-512", SIGNED_RSA_512)
+
+ def test_sign_and_verify_rsa_sha2_256(self):
+ self._sign_and_verify_rsa("rsa-sha2-256", SIGNED_RSA_256)
+
+ def test_sign_dss(self):
+ # verify that the dss private key can sign and verify
+ key = DSSKey.from_private_key_file(_support("dss.key"))
+ msg = key.sign_ssh_data(b"ice weasels")
+ self.assertTrue(type(msg) is Message)
+ msg.rewind()
+ self.assertEqual("ssh-dss", msg.get_text())
+ # can't do the same test as we do for RSA, because DSS signatures
+ # are usually different each time. but we can test verification
+ # anyway so it's ok.
+ self.assertEqual(40, len(msg.get_binary()))
+ msg.rewind()
+ pub = DSSKey(data=key.asbytes())
+ self.assertTrue(pub.verify_ssh_sig(b"ice weasels", msg))
+
+ @requires_sha1_signing
+ def test_generate_rsa(self):
+ key = RSAKey.generate(1024)
+ msg = key.sign_ssh_data(b"jerri blank")
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b"jerri blank", msg))
+
+ def test_generate_dss(self):
+ key = DSSKey.generate(1024)
+ msg = key.sign_ssh_data(b"jerri blank")
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b"jerri blank", msg))
+
+ def test_generate_ecdsa(self):
+ key = ECDSAKey.generate()
+ msg = key.sign_ssh_data(b"jerri blank")
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b"jerri blank", msg))
+ self.assertEqual(key.get_bits(), 256)
+ self.assertEqual(key.get_name(), "ecdsa-sha2-nistp256")
+
+ key = ECDSAKey.generate(bits=256)
+ msg = key.sign_ssh_data(b"jerri blank")
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b"jerri blank", msg))
+ self.assertEqual(key.get_bits(), 256)
+ self.assertEqual(key.get_name(), "ecdsa-sha2-nistp256")
+
+ key = ECDSAKey.generate(bits=384)
+ msg = key.sign_ssh_data(b"jerri blank")
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b"jerri blank", msg))
+ self.assertEqual(key.get_bits(), 384)
+ self.assertEqual(key.get_name(), "ecdsa-sha2-nistp384")
+
+ key = ECDSAKey.generate(bits=521)
+ msg = key.sign_ssh_data(b"jerri blank")
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b"jerri blank", msg))
+ self.assertEqual(key.get_bits(), 521)
+ self.assertEqual(key.get_name(), "ecdsa-sha2-nistp521")
+
+ def test_load_ecdsa_256(self):
+ key = ECDSAKey.from_private_key_file(_support("ecdsa-256.key"))
+ self.assertEqual("ecdsa-sha2-nistp256", key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_256.split()[1].replace(":", ""))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_256.split()[1], key.get_base64())
+ self.assertEqual(256, key.get_bits())
+
+ s = StringIO()
+ key.write_private_key(s)
+ self.assertEqual(ECDSA_PRIVATE_OUT_256, s.getvalue())
+ s.seek(0)
+ key2 = ECDSAKey.from_private_key(s)
+ self.assertEqual(key, key2)
+
+ def test_load_ecdsa_password_256(self):
+ key = ECDSAKey.from_private_key_file(
+ _support("test_ecdsa_password_256.key"), b"television"
+ )
+ self.assertEqual("ecdsa-sha2-nistp256", key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_256.split()[1].replace(":", ""))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_256.split()[1], key.get_base64())
+ self.assertEqual(256, key.get_bits())
+
+ def test_compare_ecdsa_256(self):
+ # verify that the private & public keys compare equal
+ key = ECDSAKey.from_private_key_file(_support("ecdsa-256.key"))
+ self.assertEqual(key, key)
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ def test_sign_ecdsa_256(self):
+ # verify that the rsa private key can sign and verify
+ key = ECDSAKey.from_private_key_file(_support("ecdsa-256.key"))
+ msg = key.sign_ssh_data(b"ice weasels")
+ self.assertTrue(type(msg) is Message)
+ msg.rewind()
+ self.assertEqual("ecdsa-sha2-nistp256", msg.get_text())
+ # ECDSA signatures, like DSS signatures, tend to be different
+ # each time, so we can't compare against a "known correct"
+ # signature.
+ # Even the length of the signature can change.
+
+ msg.rewind()
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(pub.verify_ssh_sig(b"ice weasels", msg))
+
+ def test_load_ecdsa_384(self):
+ key = ECDSAKey.from_private_key_file(_support("test_ecdsa_384.key"))
+ self.assertEqual("ecdsa-sha2-nistp384", key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_384.split()[1].replace(":", ""))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_384.split()[1], key.get_base64())
+ self.assertEqual(384, key.get_bits())
+
+ s = StringIO()
+ key.write_private_key(s)
+ self.assertEqual(ECDSA_PRIVATE_OUT_384, s.getvalue())
+ s.seek(0)
+ key2 = ECDSAKey.from_private_key(s)
+ self.assertEqual(key, key2)
+
+ def test_load_ecdsa_password_384(self):
+ key = ECDSAKey.from_private_key_file(
+ _support("test_ecdsa_password_384.key"), b"television"
+ )
+ self.assertEqual("ecdsa-sha2-nistp384", key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_384.split()[1].replace(":", ""))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_384.split()[1], key.get_base64())
+ self.assertEqual(384, key.get_bits())
+
+ def test_load_ecdsa_transmutes_crypto_exceptions(self):
+ path = _support("ecdsa-256.key")
+ # TODO: nix unittest for pytest
+ for exception in (TypeError("onoz"), UnsupportedAlgorithm("oops")):
+ with patch(
+ "paramiko.ecdsakey.serialization.load_der_private_key"
+ ) as loader:
+ loader.side_effect = exception
+ with pytest.raises(SSHException, match=str(exception)):
+ ECDSAKey.from_private_key_file(path)
+
+ def test_compare_ecdsa_384(self):
+ # verify that the private & public keys compare equal
+ key = ECDSAKey.from_private_key_file(_support("test_ecdsa_384.key"))
+ self.assertEqual(key, key)
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ def test_sign_ecdsa_384(self):
+ # verify that the rsa private key can sign and verify
+ key = ECDSAKey.from_private_key_file(_support("test_ecdsa_384.key"))
+ msg = key.sign_ssh_data(b"ice weasels")
+ self.assertTrue(type(msg) is Message)
+ msg.rewind()
+ self.assertEqual("ecdsa-sha2-nistp384", msg.get_text())
+ # ECDSA signatures, like DSS signatures, tend to be different
+ # each time, so we can't compare against a "known correct"
+ # signature.
+ # Even the length of the signature can change.
+
+ msg.rewind()
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(pub.verify_ssh_sig(b"ice weasels", msg))
+
+ def test_load_ecdsa_521(self):
+ key = ECDSAKey.from_private_key_file(_support("test_ecdsa_521.key"))
+ self.assertEqual("ecdsa-sha2-nistp521", key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_521.split()[1].replace(":", ""))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_521.split()[1], key.get_base64())
+ self.assertEqual(521, key.get_bits())
+
+ s = StringIO()
+ key.write_private_key(s)
+ # Different versions of OpenSSL (SSLeay versions 0x1000100f and
+ # 0x1000207f for instance) use different apparently valid (as far as
+ # ssh-keygen is concerned) padding. So we can't check the actual value
+ # of the pem encoded key.
+ s.seek(0)
+ key2 = ECDSAKey.from_private_key(s)
+ self.assertEqual(key, key2)
+
+ def test_load_ecdsa_password_521(self):
+ key = ECDSAKey.from_private_key_file(
+ _support("test_ecdsa_password_521.key"), b"television"
+ )
+ self.assertEqual("ecdsa-sha2-nistp521", key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_521.split()[1].replace(":", ""))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_521.split()[1], key.get_base64())
+ self.assertEqual(521, key.get_bits())
+
+ def test_compare_ecdsa_521(self):
+ # verify that the private & public keys compare equal
+ key = ECDSAKey.from_private_key_file(_support("test_ecdsa_521.key"))
+ self.assertEqual(key, key)
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ def test_sign_ecdsa_521(self):
+ # verify that the rsa private key can sign and verify
+ key = ECDSAKey.from_private_key_file(_support("test_ecdsa_521.key"))
+ msg = key.sign_ssh_data(b"ice weasels")
+ self.assertTrue(type(msg) is Message)
+ msg.rewind()
+ self.assertEqual("ecdsa-sha2-nistp521", msg.get_text())
+ # ECDSA signatures, like DSS signatures, tend to be different
+ # each time, so we can't compare against a "known correct"
+ # signature.
+ # Even the length of the signature can change.
+
+ msg.rewind()
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(pub.verify_ssh_sig(b"ice weasels", msg))
+
+ def test_load_openssh_format_RSA_key(self):
+ key = RSAKey.from_private_key_file(
+ _support("test_rsa_openssh.key"), b"television"
+ )
+ self.assertEqual("ssh-rsa", key.get_name())
+ self.assertEqual(PUB_RSA_2K_OPENSSH.split()[1], key.get_base64())
+ self.assertEqual(2048, key.get_bits())
+ exp_rsa = b(FINGER_RSA_2K_OPENSSH.split()[1].replace(":", ""))
+ my_rsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_rsa, my_rsa)
+
+ def test_loading_openssh_RSA_keys_uses_correct_p_q(self):
+ # Re #1723 - not the most elegant test but given how deep it is...
+ with patch(
+ "paramiko.rsakey.rsa.RSAPrivateNumbers", wraps=RSAPrivateNumbers
+ ) as spy:
+ # Load key
+ RSAKey.from_private_key_file(
+ _support("test_rsa_openssh.key"), b"television"
+ )
+ # Ensure spy saw the correct P and Q values as derived from
+ # hardcoded test private key value
+ kwargs = spy.call_args[1]
+ assert kwargs["p"] == RSA_2K_OPENSSH_P
+ assert kwargs["q"] == RSA_2K_OPENSSH_Q
+
+ def test_load_openssh_format_DSS_key(self):
+ key = DSSKey.from_private_key_file(
+ _support("test_dss_openssh.key"), b"television"
+ )
+ self.assertEqual("ssh-dss", key.get_name())
+ self.assertEqual(PUB_DSS_1K_OPENSSH.split()[1], key.get_base64())
+ self.assertEqual(1024, key.get_bits())
+ exp_rsa = b(FINGER_DSS_1K_OPENSSH.split()[1].replace(":", ""))
+ my_rsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_rsa, my_rsa)
+
+ def test_load_openssh_format_EC_key(self):
+ key = ECDSAKey.from_private_key_file(
+ _support("test_ecdsa_384_openssh.key"), b"television"
+ )
+ self.assertEqual("ecdsa-sha2-nistp384", key.get_name())
+ self.assertEqual(PUB_EC_384_OPENSSH.split()[1], key.get_base64())
+ self.assertEqual(384, key.get_bits())
+ exp_fp = b(FINGER_EC_384_OPENSSH.split()[1].replace(":", ""))
+ my_fp = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_fp, my_fp)
+
+ def test_salt_size(self):
+ # Read an existing encrypted private key
+ file_ = _support("test_rsa_password.key")
+ password = "television"
+ newfile = file_ + ".new"
+ newpassword = "radio"
+ key = RSAKey(filename=file_, password=password)
+ # Write out a newly re-encrypted copy with a new password.
+ # When the bug under test exists, this will ValueError.
+ try:
+ key.write_private_key_file(newfile, password=newpassword)
+ self.assert_keyfile_is_encrypted(newfile)
+ # Verify the inner key data still matches (when no ValueError)
+ key2 = RSAKey(filename=newfile, password=newpassword)
+ self.assertEqual(key, key2)
+ finally:
+ os.remove(newfile)
+
+ def test_load_openssh_format_RSA_nopad(self):
+ # check just not exploding with 'Invalid key'
+ RSAKey.from_private_key_file(_support("test_rsa_openssh_nopad.key"))
+
+ def test_stringification(self):
+ key = RSAKey.from_private_key_file(_support("rsa.key"))
+ comparable = TEST_KEY_BYTESTR
+ self.assertEqual(str(key), comparable)
+
+ def test_ed25519(self):
+ key1 = Ed25519Key.from_private_key_file(_support("ed25519.key"))
+ key2 = Ed25519Key.from_private_key_file(
+ _support("test_ed25519_password.key"), b"abc123"
+ )
+ self.assertNotEqual(key1.asbytes(), key2.asbytes())
+
+ def test_ed25519_funky_padding(self):
+ # Proves #1306 by just not exploding with 'Invalid key'.
+ Ed25519Key.from_private_key_file(
+ _support("test_ed25519-funky-padding.key")
+ )
+
+ def test_ed25519_funky_padding_with_passphrase(self):
+ # Proves #1306 by just not exploding with 'Invalid key'.
+ Ed25519Key.from_private_key_file(
+ _support("test_ed25519-funky-padding_password.key"), b"asdf"
+ )
+
+ def test_ed25519_compare(self):
+ # verify that the private & public keys compare equal
+ key = Ed25519Key.from_private_key_file(_support("ed25519.key"))
+ self.assertEqual(key, key)
+ pub = Ed25519Key(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ # No point testing on systems that never exhibited the bug originally
+ @pytest.mark.skipif(
+ not is_low_entropy(), reason="Not a low-entropy system"
+ )
+ def test_ed25519_32bit_collision(self):
+ # Re: 2021.10.19 security report email: two different private keys
+ # which Paramiko compared as equal on low-entropy platforms.
+ original = Ed25519Key.from_private_key_file(
+ _support("badhash_key1.ed25519.key")
+ )
+ generated = Ed25519Key.from_private_key_file(
+ _support("badhash_key2.ed25519.key")
+ )
+ assert original != generated
+
+ def test_ed25519_nonbytes_password(self):
+ # https://github.com/paramiko/paramiko/issues/1039
+ Ed25519Key.from_private_key_file(
+ _support("test_ed25519_password.key"),
+ # NOTE: not a bytes. Amusingly, the test above for same key DOES
+ # explicitly cast to bytes...code smell!
+ "abc123",
+ )
+ # No exception -> it's good. Meh.
+
+ def test_ed25519_load_from_file_obj(self):
+ with open(_support("ed25519.key")) as pkey_fileobj:
+ key = Ed25519Key.from_private_key(pkey_fileobj)
+ self.assertEqual(key, key)
+ self.assertTrue(key.can_sign())
+
+ def test_keyfile_is_actually_encrypted(self):
+ # Read an existing encrypted private key
+ file_ = _support("test_rsa_password.key")
+ password = "television"
+ newfile = file_ + ".new"
+ newpassword = "radio"
+ key = RSAKey(filename=file_, password=password)
+ # Write out a newly re-encrypted copy with a new password.
+ # When the bug under test exists, this will ValueError.
+ try:
+ key.write_private_key_file(newfile, password=newpassword)
+ self.assert_keyfile_is_encrypted(newfile)
+ finally:
+ os.remove(newfile)
+
+ @patch("paramiko.pkey.os")
+ def _test_keyfile_race(self, os_, exists):
+ # Re: CVE-2022-24302
+ password = "television"
+ newpassword = "radio"
+ source = _support("test_ecdsa_384.key")
+ new = source + ".new"
+ # Mock setup
+ os_.path.exists.return_value = exists
+ # Attach os flag values to mock
+ for attr, value in vars(os).items():
+ if attr.startswith("O_"):
+ setattr(os_, attr, value)
+ # Load fixture key
+ key = ECDSAKey(filename=source, password=password)
+ key._write_private_key = Mock()
+ # Write out in new location
+ key.write_private_key_file(new, password=newpassword)
+ # Expected open via os module
+ os_.open.assert_called_once_with(
+ new, flags=os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode=o600
+ )
+ os_.fdopen.assert_called_once_with(os_.open.return_value, "w")
+ assert (
+ key._write_private_key.call_args[0][0]
+ == os_.fdopen.return_value.__enter__.return_value
+ )
+
+ def test_new_keyfiles_avoid_file_descriptor_race_on_chmod(self):
+ self._test_keyfile_race(exists=False)
+
+ def test_existing_keyfiles_still_work_ok(self):
+ self._test_keyfile_race(exists=True)
+
+ def test_new_keyfiles_avoid_descriptor_race_integration(self):
+ # Integration-style version of above
+ password = "television"
+ newpassword = "radio"
+ source = _support("test_ecdsa_384.key")
+ new = source + ".new"
+ # Load fixture key
+ key = ECDSAKey(filename=source, password=password)
+ try:
+ # Write out in new location
+ key.write_private_key_file(new, password=newpassword)
+ # Test mode
+ assert stat.S_IMODE(os.stat(new).st_mode) == o600
+ # Prove can open with new password
+ reloaded = ECDSAKey(filename=new, password=newpassword)
+ assert reloaded == key
+ finally:
+ if os.path.exists(new):
+ os.unlink(new)
diff --git a/tests/test_proxy.py b/tests/test_proxy.py
new file mode 100644
index 0000000..22c2c9c
--- /dev/null
+++ b/tests/test_proxy.py
@@ -0,0 +1,150 @@
+import signal
+import socket
+
+from unittest.mock import patch
+from pytest import raises
+
+from paramiko import ProxyCommand, ProxyCommandFailure
+
+
+class TestProxyCommand:
+ @patch("paramiko.proxy.subprocess")
+ def test_init_takes_command_string(self, subprocess):
+ ProxyCommand(command_line="do a thing")
+ subprocess.Popen.assert_called_once_with(
+ ["do", "a", "thing"],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ bufsize=0,
+ )
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ def test_send_writes_to_process_stdin_returning_length(self, Popen):
+ proxy = ProxyCommand("hi")
+ written = proxy.send(b"data")
+ Popen.return_value.stdin.write.assert_called_once_with(b"data")
+ assert written == len(b"data")
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ def test_send_raises_ProxyCommandFailure_on_error(self, Popen):
+ Popen.return_value.stdin.write.side_effect = IOError(0, "whoops")
+ with raises(ProxyCommandFailure) as info:
+ ProxyCommand("hi").send("data")
+ assert info.value.command == "hi"
+ assert info.value.error == "whoops"
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ @patch("paramiko.proxy.os.read")
+ @patch("paramiko.proxy.select")
+ def test_recv_reads_from_process_stdout_returning_bytes(
+ self, select, os_read, Popen
+ ):
+ stdout = Popen.return_value.stdout
+ select.return_value = [stdout], None, None
+ fileno = stdout.fileno.return_value
+ # Force os.read to return smaller-than-requested chunks
+ os_read.side_effect = [b"was", b"t", b"e", b"of ti", b"me"]
+ proxy = ProxyCommand("hi")
+ # Ask for 5 bytes (ie b"waste")
+ data = proxy.recv(5)
+ # Ensure we got "waste" stitched together
+ assert data == b"waste"
+ # Ensure the calls happened in the sizes expected (starting with the
+ # initial "I want all 5 bytes", followed by "I want whatever I believe
+ # should be left after what I've already read", until done)
+ assert [x[0] for x in os_read.call_args_list] == [
+ (fileno, 5), # initial
+ (fileno, 2), # I got 3, want 2 more
+ (fileno, 1), # I've now got 4, want 1 more
+ ]
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ @patch("paramiko.proxy.os.read")
+ @patch("paramiko.proxy.select")
+ def test_recv_returns_buffer_on_timeout_if_any_read(
+ self, select, os_read, Popen
+ ):
+ stdout = Popen.return_value.stdout
+ select.return_value = [stdout], None, None
+ fileno = stdout.fileno.return_value
+ os_read.side_effect = [b"was", socket.timeout]
+ proxy = ProxyCommand("hi")
+ data = proxy.recv(5)
+ assert data == b"was" # not b"waste"
+ assert os_read.call_args[0] == (fileno, 2)
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ @patch("paramiko.proxy.os.read")
+ @patch("paramiko.proxy.select")
+ def test_recv_raises_timeout_if_nothing_read(self, select, os_read, Popen):
+ stdout = Popen.return_value.stdout
+ select.return_value = [stdout], None, None
+ fileno = stdout.fileno.return_value
+ os_read.side_effect = socket.timeout
+ proxy = ProxyCommand("hi")
+ with raises(socket.timeout):
+ proxy.recv(5)
+ assert os_read.call_args[0] == (fileno, 5)
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ @patch("paramiko.proxy.os.read")
+ @patch("paramiko.proxy.select")
+ def test_recv_raises_ProxyCommandFailure_on_non_timeout_error(
+ self, select, os_read, Popen
+ ):
+ select.return_value = [Popen.return_value.stdout], None, None
+ os_read.side_effect = IOError(0, "whoops")
+ with raises(ProxyCommandFailure) as info:
+ ProxyCommand("hi").recv(5)
+ assert info.value.command == "hi"
+ assert info.value.error == "whoops"
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ @patch("paramiko.proxy.os.kill")
+ def test_close_kills_subprocess(self, os_kill, Popen):
+ proxy = ProxyCommand("hi")
+ proxy.close()
+ os_kill.assert_called_once_with(Popen.return_value.pid, signal.SIGTERM)
+
+ @patch("paramiko.proxy.subprocess.Popen")
+ def test_closed_exposes_whether_subprocess_has_exited(self, Popen):
+ proxy = ProxyCommand("hi")
+ Popen.return_value.returncode = None
+ assert proxy.closed is False
+ assert proxy._closed is False
+ Popen.return_value.returncode = 0
+ assert proxy.closed is True
+ assert proxy._closed is True
+
+ @patch("paramiko.proxy.time.time")
+ @patch("paramiko.proxy.subprocess.Popen")
+ @patch("paramiko.proxy.os.read")
+ @patch("paramiko.proxy.select")
+ def test_timeout_affects_whether_timeout_is_raised(
+ self, select, os_read, Popen, time
+ ):
+ stdout = Popen.return_value.stdout
+ select.return_value = [stdout], None, None
+ # Base case: None timeout means no timing out
+ os_read.return_value = b"meh"
+ proxy = ProxyCommand("hello")
+ assert proxy.timeout is None
+ # Implicit 'no raise' check
+ assert proxy.recv(3) == b"meh"
+ # Use settimeout to set timeout, and it is honored
+ time.side_effect = [0, 10] # elapsed > 7
+ proxy = ProxyCommand("ohnoz")
+ proxy.settimeout(7)
+ assert proxy.timeout == 7
+ with raises(socket.timeout):
+ proxy.recv(3)
+
+ @patch("paramiko.proxy.subprocess", new=None)
+ @patch("paramiko.proxy.subprocess_import_error", new=ImportError("meh"))
+ def test_raises_subprocess_ImportErrors_at_runtime(self):
+ # Not an ideal test, but I don't know of a non-bad way to fake out
+ # module-time ImportErrors. So we mock the symptoms. Meh!
+ with raises(ImportError) as info:
+ ProxyCommand("hi!!!")
+ assert str(info.value) == "meh"
diff --git a/tests/test_rsa.key.pub b/tests/test_rsa.key.pub
new file mode 100644
index 0000000..bfa1e15
--- /dev/null
+++ b/tests/test_rsa.key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4c=
diff --git a/tests/test_rsa_openssh.key b/tests/test_rsa_openssh.key
new file mode 100644
index 0000000..6077c10
--- /dev/null
+++ b/tests/test_rsa_openssh.key
@@ -0,0 +1,28 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAACmFlczI1Ni1jYmMAAAAGYmNyeXB0AAAAGAAAABD0R3hOFS
+FMb2SJeo5h8QPNAAAAEAAAAAEAAAEXAAAAB3NzaC1yc2EAAAADAQABAAABAQDF+Dpr54DX
+0WdeTDpNAMdkCWEkl3OXtNgf58qlN1gX572OLBqLf0zT4bHstUEpU3piazph/rSWcUMuBo
+D46tZ6jiH7H9b9Pem2eYQWaELDDkM+v9BMbEy5rMbFRLol5OtEvPFqneyEAanPOgvd8t3y
+yhSev9QVusakzJ8j8LGgrA8huYZ+Srnw0shEWLG70KUKCh3rG0QIvA8nfhtUOisr2Gp+F0
+YxMGb5gwBlQYAYE5l6u1SjZ7hNjyNosjK+wRBFgFFBYVpkZKJgWoK9w4ijFyzMZTucnZMq
+KOKAjIJvHfKBf2/cEfYxSq1EndqTqjYsd9T7/s2vcn1OH5a0wkERAAAD0JnzCJYfDeiUQ6
+9LOAb6/NnhKvFjCdBYal60MfLcLBHvzHLJvTneQ4f1Vknq8xEVmRba7SDSfwaEybP/1FsP
+SGH6FNKA5gKllemgmcaUVr3wtNPtjX4WgsyHcwCRgHmOiyNrUj0OZR5wbZabHIIyirl4wa
+LBz8Jb3GalKEagtyWsBKDCKHCFNzh8xmsT1SWhnC7baRyC8e3krQm9hGbNhpj6Q5AtN3ql
+wBVamUp0eKxkt70mKBKI4v3DR8KqrEndeK6d0cegVEkE67fqa99a5J3uSDC8mglKrHiKEs
+dU1dh/bOF/H3aFpINlRwvlZ95Opby7rG0BHgbZONq0+VUnABVzNTM5Xd5UKjjCF28CrQBf
+XS6WeHeUx2zHtOmL1xdePk+Bii+SSUl3pLa4SDwX4nV95cSPx8vMm8dJEruxad6+MPoSuy
+Oyho89jqUTSgC/RPejuTgrnB3WbzE5SJb+V3zMata0J1wxbNfYKG9U+VucUZhP4+jzfNqH
+B/v8JqtuxnqR8NjPsK2+8wJxebL2KVNjKOm//6P3KSDsavpscGpVWOM06zUlwWCB26W3pP
+X/+xO9aR5wiBteFKoJG1waziIjqhOJSmvq+I/texUKEUd/eEFNt10Ubc0zy0sRYVN8rIRJ
+masQzCYuUylDzCa4ar1s4qngBZzWL2PRkPuXuhoHuT0J5no174GR6+6EAYZZhnq0tkYrhZ
+Ar0tQ4CDlI235a3MPHzvABuwYuWys1tBuLAb+6Gc6CmCiQ+mhojfQUBYG5T65iRFA5UQsH
+O1RLEC3yasxGcBI6d0J/fwOP/YLktNu3AeUumr0N9Xgf02DlBNwd+4GOI0LcQvl/3J8ppo
+bamTppKPEZ2d32VNEO+Z6Zx5DlIVm5gDeMvIvdwap445VnhL3ZZH2NCkAcXM9+0WH+Quas
+JCAMgPYiP9FzF+8Onmj2OmhgIVj/9eanhS3/GLrRC4xCvER2V7PwgB0I5qY110BPEttDyo
+IvYE51kvtdW447SK7HZywJnkyw2RNm+29dvWJJwSQckUHuZkXEtmEPk0ePL3yf2NH5XYJc
+pXX6Zac0KemCPIHr8l7GogE4Rb2BBTqddkegb9piz6QTAPcQnn+GuMFG06IBhUrgcMEQ8x
+UOXYUUrT5HvSxWUcgaYH1nfC3bTWmDaodw8/HQKyF6c44rujO2s2NLFOCAyQMUNdhh3lfD
+yHYLO7xYkP6xzzkpk+2lwBoeYdQdAwlKN/XqC8ZhBfwTdem/1hh1BpQJFbbFftWxU8gxxi
+iuI+vmlsuIsxKoGCq8YXuophx62lo=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/test_rsa_openssh_nopad.key b/tests/test_rsa_openssh_nopad.key
new file mode 100644
index 0000000..61ac1b1
--- /dev/null
+++ b/tests/test_rsa_openssh_nopad.key
@@ -0,0 +1,27 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAQEAnyMwWSwrbJxxQZWMJO5xR6eAA9De4t3GViqDRaQt/BgsvzZ14SUz
+aOL/A370fKxhx/JLIOOGA0o5B0/ct+CL7XFqMi5r5+iA9VcIeYKKtoAkrEvRnagNW0WVWx
+thTnE01g8Pb7fDqzI2cBuBNZ2vGNm2m4UTGC8/kl/0ES1V3KqA7lPlTrkTYg9L/ornvVHc
+c8gEbMwx9XXVRzbWiuDE176ojrudY9CZduVSOgW+HK3rKkqLBs/91jv0zUK0oqTQBLR7E2
+V2GWPDU4BjlHTtYr0jpKOGDr1DLu4+NiD/mX+tGMdH6ehbDii0kXmOUaZjs4OxuK3XA/gi
+KZLdj1jQQwAAA7iNnvAVjZ7wFQAAAAdzc2gtcnNhAAABAQCfIzBZLCtsnHFBlYwk7nFHp4
+AD0N7i3cZWKoNFpC38GCy/NnXhJTNo4v8DfvR8rGHH8ksg44YDSjkHT9y34IvtcWoyLmvn
+6ID1Vwh5goq2gCSsS9GdqA1bRZVbG2FOcTTWDw9vt8OrMjZwG4E1na8Y2babhRMYLz+SX/
+QRLVXcqoDuU+VOuRNiD0v+iue9UdxzyARszDH1ddVHNtaK4MTXvqiOu51j0Jl25VI6Bb4c
+resqSosGz/3WO/TNQrSipNAEtHsTZXYZY8NTgGOUdO1ivSOko4YOvUMu7j42IP+Zf60Yx0
+fp6FsOKLSReY5RpmOzg7G4rdcD+CIpkt2PWNBDAAAAAwEAAQAAAQEAnmMbn+VCYxth7fC2
+R5u6y6J+201sSUiKOwCdHxdFXX+CKd4+fRPVkzM6tXQKSnwX5jXVaKqLm4KoOArYl3q6Sl
+1zYParF2plz8oL+URgYzwvQ/1CaDP29zzOZptdwgESoWrj5kF0UlPrsrDtbTvAJm+qPCe6
+1XtRPpKaDO6eYr0PM2QTElZy3mDBUBvu816LdG/ZtnB9g5UsocT5mmhpHTHdjrpwNu5TBe
+ACVodDn5Fu66OlrrnQi4IPCAWKJ1YuzEkZqLhs1L3oMHACsmzrLjzW74SjY4kWTTvGiC6i
+tDoycycThk9EGLGNso99Q1fe84/OZUff7aI3yK9KvLL7oQAAAIEAh2+XrJXSBx/v9E3aJH
+ncgQH1snXr7LcSRqcWicHdbm8JsOTT3TkyXHGlSZ2rr/Y0u5V1ZSO6roJLrAHsDJzx0x0U
+xE/5mpzhD+yIKQwnWkZFLzYEnYDFdXDMzmghUIik9AW7n9dtS8UtVFGaL6Vs2YCOuLqeT9
+nZUkm3UUZ+7QIAAACBAM23DFjQ0/Op2ri7fJA2qFBdXqoJdNHuyYEIrKbB6XaaSUz52+IB
+MbccxEz3vPsHh69tZoJ+xZNbFJe9wdmbF+DQpoukHkJnzpk/pUq8LjQMzZfwv41X8zqaq4
+AOA7g27Rk8aKewhCXjhkr0hHEaSiuqIIindFaFti5sQMi2mtkXAAAAgQDGCXkpuKZK61p9
+L6G5yZSQBCgVtm0iQEbyDXWHjy/GqLtxJjqdyaRK57hXGjbzgJJraSy+sNP9uv2QOvyZvB
+3XaPWwUYVQ34WyibCqqUaPiHxX7T1lZV+asbwgbmSqYtH5dUEJ8zT572mCwxnRjX63PwDo
+5vBbR/qAW5lvRYsltQAAAAFh
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/test_rsa_password.key b/tests/test_rsa_password.key
new file mode 100644
index 0000000..7713049
--- /dev/null
+++ b/tests/test_rsa_password.key
@@ -0,0 +1,18 @@
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,DAA422E8A5A8EFB7
+
++nssHGmWl91IcmGiE6DdCIqGvAP04tuLh60wLjWBvdjtF9CjztPnF57xe+6pBk7o
+YgF/Ry3ik9ZV9rHNcRXifDKM9crxtYlpUlkM2C0SP89sXaO0P1Q1yCnrtZUwDIKO
+BNV8et5X7+AGMFsy/nmv0NFMrbpoG03Dppsloecd29NTRlIXwxHRFyHxy6BdEib/
+Dn0mEVbwg3dTvKrd/sODWR9hRwpDGM9nkEbUNJCh7vMwFKkIZZF8yqFvmGckuO5C
+HZkDJ6RkEDYrSZJAavQaiOPF5bu3cHughRfnrIKVrQuTTDiWjwX9Ny8e4p4k7dy7
+rLpbPhtxUOUbpOF7T1QxljDi1Tcq3Ebk3kN/ZLPRFnDrJfyUx+m9BXmAa78Wxs/l
+KaS8DTkYykd3+EGOeJFjZg2bvgqil4V+5JIt/+MQ5pZ/ui7i4GcH2bvZyGAbrXzP
+3LipSAdN5RG+fViLe3HUtfCx4ZAgtU78TWJrLk2FwKQGglFxKLnswp+IKZb09rZV
+uxmG4pPLUnH+mMYdiy5ugzj+5C8iZ0/IstpHVmO6GWROfedpJ82eMztTOtdhfMep
+8Z3HwAwkDtksL7Gq9klb0Wq5+uRlBWetixddAvnmqXNzYhaANWcAF/2a2Hz06Rb0
+e6pe/g0Ek5KV+6YI+D+oEblG0Sr+d4NtxtDTmIJKNVkmzlhI2s53bHp6txCb5JWJ
+S8mKLPBBBzaNXYd3odDvGXguuxUntWSsD11KyR6B9DXMIfWQW5dT7hp5kTMGlXWJ
+lD2hYab13DCCuAkwVTdpzhHYLZyxLYoSu05W6z8SAOs=
+-----END RSA PRIVATE KEY-----
diff --git a/tests/test_sftp.py b/tests/test_sftp.py
new file mode 100644
index 0000000..7fd274b
--- /dev/null
+++ b/tests/test_sftp.py
@@ -0,0 +1,832 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+some unit tests to make sure sftp works.
+
+a real actual sftp server is contacted, and a new folder is created there to
+do test file operations in (so no existing files will be harmed).
+"""
+
+import os
+import socket
+import sys
+import warnings
+from binascii import hexlify
+from io import StringIO
+from tempfile import mkstemp
+
+import pytest
+
+from paramiko.common import o777, o600, o666, o644
+from paramiko.sftp_attr import SFTPAttributes
+from paramiko.util import b, u
+from tests import requireNonAsciiLocale
+
+from ._util import needs_builtin
+from ._util import slow
+
+
+ARTICLE = """
+Insulin sensitivity and liver insulin receptor structure in ducks from two
+genera
+
+T. Constantine, B. Chevalier, M. Derouet and J. Simon
+Station de Recherches Avicoles, Institut National de la Recherche Agronomique,
+Nouzilly, France.
+
+Insulin sensitivity and liver insulin receptor structure were studied in
+5-wk-old ducks from two genera (Muscovy and Pekin). In the fasting state, both
+duck types were equally resistant to exogenous insulin compared with chicken.
+Despite the low potency of duck insulin, the number of insulin receptors was
+lower in Muscovy duck and similar in Pekin duck and chicken liver membranes.
+After 125I-insulin cross-linking, the size of the alpha-subunit of the
+receptors from the three species was 135,000. Wheat germ agglutinin-purified
+receptors from the three species were contaminated by an active and unusual
+adenosinetriphosphatase (ATPase) contaminant (highest activity in Muscovy
+duck). Sequential purification of solubilized receptor from both duck types on
+lentil and then wheat germ agglutinin lectins led to a fraction of receptors
+very poor in ATPase activity that exhibited a beta-subunit size (95,000) and
+tyrosine kinase activity similar to those of ATPase-free chicken insulin
+receptors. Therefore the ducks from the two genera exhibit an alpha-beta-
+structure for liver insulin receptors and a clear difference in the number of
+liver insulin receptors. Their sensitivity to insulin is, however, similarly
+decreased compared with chicken.
+"""
+
+
+# Here is how unicode characters are encoded over 1 to 6 bytes in utf-8
+# U-00000000 - U-0000007F:
+# 0xxxxxxx
+# U-00000080 - U-000007FF:
+# 110xxxxx 10xxxxxx
+# U-00000800 - U-0000FFFF:
+# 1110xxxx 10xxxxxx 10xxxxxx
+# U-00010000 - U-001FFFFF:
+# 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+# U-00200000 - U-03FFFFFF:
+# 111110xx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx
+# U-04000000 - U-7FFFFFFF:
+# 1111110x 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx
+# Note that: hex(int('11000011',2)) == '0xc3'
+# Thus, the following 2-bytes sequence is not valid utf8: "invalid continuation
+# byte"
+NON_UTF8_DATA = b"\xC3\xC3"
+
+unicode_folder = "\u00fcnic\u00f8de"
+utf8_folder = b"/\xc3\xbcnic\xc3\xb8\x64\x65"
+
+
+@slow
+class TestSFTP:
+ def test_file(self, sftp):
+ """
+ verify that we can create a file.
+ """
+ f = sftp.open(sftp.FOLDER + "/test", "w")
+ try:
+ assert f.stat().st_size == 0
+ finally:
+ f.close()
+ sftp.remove(sftp.FOLDER + "/test")
+
+ def test_close(self, sftp):
+ """
+ Verify that SFTP session close() causes a socket error on next action.
+ """
+ sftp.close()
+ with pytest.raises(socket.error, match="Socket is closed"):
+ sftp.open(sftp.FOLDER + "/test2", "w")
+
+ def test_sftp_can_be_used_as_context_manager(self, sftp):
+ """
+ verify that the sftp session is closed when exiting the context manager
+ """
+ with sftp:
+ pass
+ with pytest.raises(socket.error, match="Socket is closed"):
+ sftp.open(sftp.FOLDER + "/test2", "w")
+
+ def test_write(self, sftp):
+ """
+ verify that a file can be created and written, and the size is correct.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/duck.txt", "w") as f:
+ f.write(ARTICLE)
+ assert sftp.stat(sftp.FOLDER + "/duck.txt").st_size == 1486
+ finally:
+ sftp.remove(sftp.FOLDER + "/duck.txt")
+
+ def test_sftp_file_can_be_used_as_context_manager(self, sftp):
+ """
+ verify that an opened file can be used as a context manager
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/duck.txt", "w") as f:
+ f.write(ARTICLE)
+ assert sftp.stat(sftp.FOLDER + "/duck.txt").st_size == 1486
+ finally:
+ sftp.remove(sftp.FOLDER + "/duck.txt")
+
+ def test_append(self, sftp):
+ """
+ verify that a file can be opened for append, and tell() still works.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/append.txt", "w") as f:
+ f.write("first line\nsecond line\n")
+ assert f.tell() == 23
+
+ with sftp.open(sftp.FOLDER + "/append.txt", "a+") as f:
+ f.write("third line!!!\n")
+ assert f.tell() == 37
+ assert f.stat().st_size == 37
+ f.seek(-26, f.SEEK_CUR)
+ assert f.readline() == "second line\n"
+ finally:
+ sftp.remove(sftp.FOLDER + "/append.txt")
+
+ def test_rename(self, sftp):
+ """
+ verify that renaming a file works.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/first.txt", "w") as f:
+ f.write("content!\n")
+ sftp.rename(
+ sftp.FOLDER + "/first.txt", sftp.FOLDER + "/second.txt"
+ )
+ with pytest.raises(IOError, match="No such file"):
+ sftp.open(sftp.FOLDER + "/first.txt", "r")
+ with sftp.open(sftp.FOLDER + "/second.txt", "r") as f:
+ f.seek(-6, f.SEEK_END)
+ assert u(f.read(4)) == "tent"
+ finally:
+ # TODO: this is gross, make some sort of 'remove if possible' / 'rm
+ # -f' a-like, jeez
+ try:
+ sftp.remove(sftp.FOLDER + "/first.txt")
+ except:
+ pass
+ try:
+ sftp.remove(sftp.FOLDER + "/second.txt")
+ except:
+ pass
+
+ def testa_posix_rename(self, sftp):
+ """Test posix-rename@openssh.com protocol extension."""
+ try:
+ # first check that the normal rename works as specified
+ with sftp.open(sftp.FOLDER + "/a", "w") as f:
+ f.write("one")
+ sftp.rename(sftp.FOLDER + "/a", sftp.FOLDER + "/b")
+ with sftp.open(sftp.FOLDER + "/a", "w") as f:
+ f.write("two")
+ with pytest.raises(IOError): # actual message seems generic
+ sftp.rename(sftp.FOLDER + "/a", sftp.FOLDER + "/b")
+
+ # now check with the posix_rename
+ sftp.posix_rename(sftp.FOLDER + "/a", sftp.FOLDER + "/b")
+ with sftp.open(sftp.FOLDER + "/b", "r") as f:
+ data = u(f.read())
+ err = "Contents of renamed file not the same as original file"
+ assert "two" == data, err
+
+ finally:
+ try:
+ sftp.remove(sftp.FOLDER + "/a")
+ except:
+ pass
+ try:
+ sftp.remove(sftp.FOLDER + "/b")
+ except:
+ pass
+
+ def test_folder(self, sftp):
+ """
+ create a temporary folder, verify that we can create a file in it, then
+ remove the folder and verify that we can't create a file in it anymore.
+ """
+ sftp.mkdir(sftp.FOLDER + "/subfolder")
+ sftp.open(sftp.FOLDER + "/subfolder/test", "w").close()
+ sftp.remove(sftp.FOLDER + "/subfolder/test")
+ sftp.rmdir(sftp.FOLDER + "/subfolder")
+ # shouldn't be able to create that file if dir removed
+ with pytest.raises(IOError, match="No such file"):
+ sftp.open(sftp.FOLDER + "/subfolder/test")
+
+ def test_listdir(self, sftp):
+ """
+ verify that a folder can be created, a bunch of files can be placed in
+ it, and those files show up in sftp.listdir.
+ """
+ try:
+ sftp.open(sftp.FOLDER + "/duck.txt", "w").close()
+ sftp.open(sftp.FOLDER + "/fish.txt", "w").close()
+ sftp.open(sftp.FOLDER + "/tertiary.py", "w").close()
+
+ x = sftp.listdir(sftp.FOLDER)
+ assert len(x) == 3
+ assert "duck.txt" in x
+ assert "fish.txt" in x
+ assert "tertiary.py" in x
+ assert "random" not in x
+ finally:
+ sftp.remove(sftp.FOLDER + "/duck.txt")
+ sftp.remove(sftp.FOLDER + "/fish.txt")
+ sftp.remove(sftp.FOLDER + "/tertiary.py")
+
+ def test_listdir_iter(self, sftp):
+ """
+ listdir_iter version of above test
+ """
+ try:
+ sftp.open(sftp.FOLDER + "/duck.txt", "w").close()
+ sftp.open(sftp.FOLDER + "/fish.txt", "w").close()
+ sftp.open(sftp.FOLDER + "/tertiary.py", "w").close()
+
+ x = [x.filename for x in sftp.listdir_iter(sftp.FOLDER)]
+ assert len(x) == 3
+ assert "duck.txt" in x
+ assert "fish.txt" in x
+ assert "tertiary.py" in x
+ assert "random" not in x
+ finally:
+ sftp.remove(sftp.FOLDER + "/duck.txt")
+ sftp.remove(sftp.FOLDER + "/fish.txt")
+ sftp.remove(sftp.FOLDER + "/tertiary.py")
+
+ @requireNonAsciiLocale()
+ def test_listdir_in_locale(self, sftp):
+ """Test listdir under a locale that uses non-ascii text."""
+ sftp.open(sftp.FOLDER + "/canard.txt", "w").close()
+ try:
+ folder_contents = sftp.listdir(sftp.FOLDER)
+ assert ["canard.txt"] == folder_contents
+ finally:
+ sftp.remove(sftp.FOLDER + "/canard.txt")
+
+ def test_setstat(self, sftp):
+ """
+ verify that the setstat functions (chown, chmod, utime, truncate) work.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/special", "w") as f:
+ f.write("x" * 1024)
+
+ stat = sftp.stat(sftp.FOLDER + "/special")
+ sftp.chmod(sftp.FOLDER + "/special", (stat.st_mode & ~o777) | o600)
+ stat = sftp.stat(sftp.FOLDER + "/special")
+ expected_mode = o600
+ if sys.platform == "win32":
+ # chmod not really functional on windows
+ expected_mode = o666
+ if sys.platform == "cygwin":
+ # even worse.
+ expected_mode = o644
+ assert stat.st_mode & o777 == expected_mode
+ assert stat.st_size == 1024
+
+ mtime = stat.st_mtime - 3600
+ atime = stat.st_atime - 1800
+ sftp.utime(sftp.FOLDER + "/special", (atime, mtime))
+ stat = sftp.stat(sftp.FOLDER + "/special")
+ assert stat.st_mtime == mtime
+ if sys.platform not in ("win32", "cygwin"):
+ assert stat.st_atime == atime
+
+ # can't really test chown, since we'd have to know a valid uid.
+
+ sftp.truncate(sftp.FOLDER + "/special", 512)
+ stat = sftp.stat(sftp.FOLDER + "/special")
+ assert stat.st_size == 512
+ finally:
+ sftp.remove(sftp.FOLDER + "/special")
+
+ def test_fsetstat(self, sftp):
+ """
+ verify that the fsetstat functions (chown, chmod, utime, truncate)
+ work on open files.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/special", "w") as f:
+ f.write("x" * 1024)
+
+ with sftp.open(sftp.FOLDER + "/special", "r+") as f:
+ stat = f.stat()
+ f.chmod((stat.st_mode & ~o777) | o600)
+ stat = f.stat()
+
+ expected_mode = o600
+ if sys.platform == "win32":
+ # chmod not really functional on windows
+ expected_mode = o666
+ if sys.platform == "cygwin":
+ # even worse.
+ expected_mode = o644
+ assert stat.st_mode & o777 == expected_mode
+ assert stat.st_size == 1024
+
+ mtime = stat.st_mtime - 3600
+ atime = stat.st_atime - 1800
+ f.utime((atime, mtime))
+ stat = f.stat()
+ assert stat.st_mtime == mtime
+ if sys.platform not in ("win32", "cygwin"):
+ assert stat.st_atime == atime
+
+ # can't really test chown, since we'd have to know a valid uid.
+
+ f.truncate(512)
+ stat = f.stat()
+ assert stat.st_size == 512
+ finally:
+ sftp.remove(sftp.FOLDER + "/special")
+
+ def test_readline_seek(self, sftp):
+ """
+ create a text file and write a bunch of text into it. then count the
+ lines in the file, and seek around to retrieve particular lines. this
+ should verify that read buffering and 'tell' work well together, and
+ that read buffering is reset on 'seek'.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/duck.txt", "w") as f:
+ f.write(ARTICLE)
+
+ with sftp.open(sftp.FOLDER + "/duck.txt", "r+") as f:
+ line_number = 0
+ loc = 0
+ pos_list = []
+ for line in f:
+ line_number += 1
+ pos_list.append(loc)
+ loc = f.tell()
+ assert f.seekable()
+ f.seek(pos_list[6], f.SEEK_SET)
+ assert f.readline(), "Nouzilly == France.\n"
+ f.seek(pos_list[17], f.SEEK_SET)
+ assert f.readline()[:4] == "duck"
+ f.seek(pos_list[10], f.SEEK_SET)
+ expected = "duck types were equally resistant to exogenous insulin compared with chicken.\n" # noqa
+ assert f.readline() == expected
+ finally:
+ sftp.remove(sftp.FOLDER + "/duck.txt")
+
+ def test_write_seek(self, sftp):
+ """
+ Create a text file, seek back, change it, and verify.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/testing.txt", "w") as f:
+ f.write("hello kitty.\n")
+ f.seek(-5, f.SEEK_CUR)
+ f.write("dd")
+
+ assert sftp.stat(sftp.FOLDER + "/testing.txt").st_size == 13
+ with sftp.open(sftp.FOLDER + "/testing.txt", "r") as f:
+ data = f.read(20)
+ assert data == b"hello kiddy.\n"
+ finally:
+ sftp.remove(sftp.FOLDER + "/testing.txt")
+
+ def test_symlink(self, sftp):
+ """
+ create a symlink and then check that lstat doesn't follow it.
+ """
+ if not hasattr(os, "symlink"):
+ # skip symlink tests on windows
+ return
+
+ try:
+ with sftp.open(sftp.FOLDER + "/original.txt", "w") as f:
+ f.write("original\n")
+ sftp.symlink("original.txt", sftp.FOLDER + "/link.txt")
+ assert sftp.readlink(sftp.FOLDER + "/link.txt") == "original.txt"
+
+ with sftp.open(sftp.FOLDER + "/link.txt", "r") as f:
+ assert f.readlines() == ["original\n"]
+
+ cwd = sftp.normalize(".")
+ if cwd[-1] == "/":
+ cwd = cwd[:-1]
+ abs_path = cwd + "/" + sftp.FOLDER + "/original.txt"
+ sftp.symlink(abs_path, sftp.FOLDER + "/link2.txt")
+ assert abs_path == sftp.readlink(sftp.FOLDER + "/link2.txt")
+
+ assert sftp.lstat(sftp.FOLDER + "/link.txt").st_size == 12
+ assert sftp.stat(sftp.FOLDER + "/link.txt").st_size == 9
+ # the sftp server may be hiding extra path members from us, so the
+ # length may be longer than we expect:
+ assert sftp.lstat(sftp.FOLDER + "/link2.txt").st_size >= len(
+ abs_path
+ )
+ assert sftp.stat(sftp.FOLDER + "/link2.txt").st_size == 9
+ assert sftp.stat(sftp.FOLDER + "/original.txt").st_size == 9
+ finally:
+ try:
+ sftp.remove(sftp.FOLDER + "/link.txt")
+ except:
+ pass
+ try:
+ sftp.remove(sftp.FOLDER + "/link2.txt")
+ except:
+ pass
+ try:
+ sftp.remove(sftp.FOLDER + "/original.txt")
+ except:
+ pass
+
+ def test_flush_seek(self, sftp):
+ """
+ verify that buffered writes are automatically flushed on seek.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/happy.txt", "w", 1) as f:
+ f.write("full line.\n")
+ f.write("partial")
+ f.seek(9, f.SEEK_SET)
+ f.write("?\n")
+
+ with sftp.open(sftp.FOLDER + "/happy.txt", "r") as f:
+ assert f.readline() == u("full line?\n")
+ assert f.read(7) == b"partial"
+ finally:
+ try:
+ sftp.remove(sftp.FOLDER + "/happy.txt")
+ except:
+ pass
+
+ def test_realpath(self, sftp):
+ """
+ test that realpath is returning something non-empty and not an
+ error.
+ """
+ pwd = sftp.normalize(".")
+ assert len(pwd) > 0
+ f = sftp.normalize("./" + sftp.FOLDER)
+ assert len(f) > 0
+ assert os.path.join(pwd, sftp.FOLDER) == f
+
+ def test_mkdir(self, sftp):
+ """
+ verify that mkdir/rmdir work.
+ """
+ sftp.mkdir(sftp.FOLDER + "/subfolder")
+ with pytest.raises(IOError): # generic msg only
+ sftp.mkdir(sftp.FOLDER + "/subfolder")
+ sftp.rmdir(sftp.FOLDER + "/subfolder")
+ with pytest.raises(IOError, match="No such file"):
+ sftp.rmdir(sftp.FOLDER + "/subfolder")
+
+ def test_chdir(self, sftp):
+ """
+ verify that chdir/getcwd work.
+ """
+ root = sftp.normalize(".")
+ if root[-1] != "/":
+ root += "/"
+ try:
+ sftp.mkdir(sftp.FOLDER + "/alpha")
+ sftp.chdir(sftp.FOLDER + "/alpha")
+ sftp.mkdir("beta")
+ assert root + sftp.FOLDER + "/alpha" == sftp.getcwd()
+ assert ["beta"] == sftp.listdir(".")
+
+ sftp.chdir("beta")
+ with sftp.open("fish", "w") as f:
+ f.write("hello\n")
+ sftp.chdir("..")
+ assert ["fish"] == sftp.listdir("beta")
+ sftp.chdir("..")
+ assert ["fish"] == sftp.listdir("alpha/beta")
+ finally:
+ sftp.chdir(root)
+ try:
+ sftp.unlink(sftp.FOLDER + "/alpha/beta/fish")
+ except:
+ pass
+ try:
+ sftp.rmdir(sftp.FOLDER + "/alpha/beta")
+ except:
+ pass
+ try:
+ sftp.rmdir(sftp.FOLDER + "/alpha")
+ except:
+ pass
+
+ def test_get_put(self, sftp):
+ """
+ verify that get/put work.
+ """
+ warnings.filterwarnings("ignore", "tempnam.*")
+
+ fd, localname = mkstemp()
+ os.close(fd)
+ text = b"All I wanted was a plastic bunny rabbit.\n"
+ with open(localname, "wb") as f:
+ f.write(text)
+ saved_progress = []
+
+ def progress_callback(x, y):
+ saved_progress.append((x, y))
+
+ sftp.put(localname, sftp.FOLDER + "/bunny.txt", progress_callback)
+
+ with sftp.open(sftp.FOLDER + "/bunny.txt", "rb") as f:
+ assert text == f.read(128)
+ assert [(41, 41)] == saved_progress
+
+ os.unlink(localname)
+ fd, localname = mkstemp()
+ os.close(fd)
+ saved_progress = []
+ sftp.get(sftp.FOLDER + "/bunny.txt", localname, progress_callback)
+
+ with open(localname, "rb") as f:
+ assert text == f.read(128)
+ assert [(41, 41)] == saved_progress
+
+ os.unlink(localname)
+ sftp.unlink(sftp.FOLDER + "/bunny.txt")
+
+ def test_get_without_prefetch(self, sftp):
+ """
+ Create a 4MB file. Verify that pull works without prefetching
+ using a lager file.
+ """
+
+ sftp_filename = sftp.FOLDER + "/dummy_file"
+ num_chars = 1024 * 1024 * 4
+
+ fd, localname = mkstemp()
+ os.close(fd)
+
+ with open(localname, "wb") as f:
+ f.write(b"0" * num_chars)
+
+ sftp.put(localname, sftp_filename)
+
+ os.unlink(localname)
+ fd, localname = mkstemp()
+ os.close(fd)
+
+ sftp.get(sftp_filename, localname, prefetch=False)
+
+ assert os.stat(localname).st_size == num_chars
+
+ os.unlink(localname)
+ sftp.unlink(sftp_filename)
+
+ def test_check(self, sftp):
+ """
+ verify that file.check() works against our own server.
+ (it's an sftp extension that we support, and may be the only ones who
+ support it.)
+ """
+ with sftp.open(sftp.FOLDER + "/kitty.txt", "w") as f:
+ f.write("here kitty kitty" * 64)
+
+ try:
+ with sftp.open(sftp.FOLDER + "/kitty.txt", "r") as f:
+ sum = f.check("sha1")
+ assert (
+ "91059CFC6615941378D413CB5ADAF4C5EB293402"
+ == u(hexlify(sum)).upper()
+ )
+ sum = f.check("md5", 0, 512)
+ assert (
+ "93DE4788FCA28D471516963A1FE3856A"
+ == u(hexlify(sum)).upper()
+ )
+ sum = f.check("md5", 0, 0, 510)
+ expected = "EB3B45B8CD55A0707D99B177544A319F373183D241432BB2157AB9E46358C4AC90370B5CADE5D90336FC1716F90B36D6" # noqa
+ assert u(hexlify(sum)).upper() == expected
+ finally:
+ sftp.unlink(sftp.FOLDER + "/kitty.txt")
+
+ def test_x_flag(self, sftp):
+ """
+ verify that the 'x' flag works when opening a file.
+ """
+ sftp.open(sftp.FOLDER + "/unusual.txt", "wx").close()
+
+ try:
+ with pytest.raises(IOError):
+ sftp.open(sftp.FOLDER + "/unusual.txt", "wx")
+ finally:
+ sftp.unlink(sftp.FOLDER + "/unusual.txt")
+
+ def test_utf8(self, sftp):
+ """
+ verify that unicode strings are encoded into utf8 correctly.
+ """
+ with sftp.open(sftp.FOLDER + "/something", "w") as f:
+ f.write("okay")
+ try:
+ sftp.rename(
+ sftp.FOLDER + "/something", sftp.FOLDER + "/" + unicode_folder
+ )
+ sftp.open(b(sftp.FOLDER) + utf8_folder, "r")
+ finally:
+ sftp.unlink(b(sftp.FOLDER) + utf8_folder)
+
+ def test_utf8_chdir(self, sftp):
+ sftp.mkdir(sftp.FOLDER + "/" + unicode_folder)
+ try:
+ sftp.chdir(sftp.FOLDER + "/" + unicode_folder)
+ with sftp.open("something", "w") as f:
+ f.write("okay")
+ sftp.unlink("something")
+ finally:
+ sftp.chdir()
+ sftp.rmdir(sftp.FOLDER + "/" + unicode_folder)
+
+ def test_bad_readv(self, sftp):
+ """
+ verify that readv at the end of the file doesn't essplode.
+ """
+ sftp.open(sftp.FOLDER + "/zero", "w").close()
+ try:
+ with sftp.open(sftp.FOLDER + "/zero", "r") as f:
+ f.readv([(0, 12)])
+
+ with sftp.open(sftp.FOLDER + "/zero", "r") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+ f.read(100)
+ finally:
+ sftp.unlink(sftp.FOLDER + "/zero")
+
+ def test_put_without_confirm(self, sftp):
+ """
+ verify that get/put work without confirmation.
+ """
+ warnings.filterwarnings("ignore", "tempnam.*")
+
+ fd, localname = mkstemp()
+ os.close(fd)
+ text = b"All I wanted was a plastic bunny rabbit.\n"
+ with open(localname, "wb") as f:
+ f.write(text)
+ saved_progress = []
+
+ def progress_callback(x, y):
+ saved_progress.append((x, y))
+
+ res = sftp.put(
+ localname, sftp.FOLDER + "/bunny.txt", progress_callback, False
+ )
+
+ assert SFTPAttributes().attr == res.attr
+
+ with sftp.open(sftp.FOLDER + "/bunny.txt", "r") as f:
+ assert text == f.read(128)
+ assert (41, 41) == saved_progress[-1]
+
+ os.unlink(localname)
+ sftp.unlink(sftp.FOLDER + "/bunny.txt")
+
+ def test_getcwd(self, sftp):
+ """
+ verify that chdir/getcwd work.
+ """
+ assert sftp.getcwd() is None
+ root = sftp.normalize(".")
+ if root[-1] != "/":
+ root += "/"
+ try:
+ sftp.mkdir(sftp.FOLDER + "/alpha")
+ sftp.chdir(sftp.FOLDER + "/alpha")
+ assert sftp.getcwd() == "/" + sftp.FOLDER + "/alpha"
+ finally:
+ sftp.chdir(root)
+ try:
+ sftp.rmdir(sftp.FOLDER + "/alpha")
+ except:
+ pass
+
+ def test_seek_append(self, sftp):
+ """
+ verify that seek doesn't affect writes during append.
+
+ does not work except through paramiko. :( openssh fails.
+ """
+ try:
+ with sftp.open(sftp.FOLDER + "/append.txt", "a") as f:
+ f.write("first line\nsecond line\n")
+ f.seek(11, f.SEEK_SET)
+ f.write("third line\n")
+
+ with sftp.open(sftp.FOLDER + "/append.txt", "r") as f:
+ assert f.stat().st_size == 34
+ assert f.readline() == "first line\n"
+ assert f.readline() == "second line\n"
+ assert f.readline() == "third line\n"
+ finally:
+ sftp.remove(sftp.FOLDER + "/append.txt")
+
+ def test_putfo_empty_file(self, sftp):
+ """
+ Send an empty file and confirm it is sent.
+ """
+ target = sftp.FOLDER + "/empty file.txt"
+ stream = StringIO()
+ try:
+ attrs = sftp.putfo(stream, target)
+ # the returned attributes should not be null
+ assert attrs is not None
+ finally:
+ sftp.remove(target)
+
+ # TODO: this test doesn't actually fail if the regression (removing '%'
+ # expansion to '%%' within sftp.py's def _log()) is removed - stacktraces
+ # appear but they're clearly emitted from subthreads that have no error
+ # handling. No point running it until that is fixed somehow.
+ @pytest.mark.skip("Doesn't prove anything right now")
+ def test_file_with_percent(self, sftp):
+ """
+ verify that we can create a file with a '%' in the filename.
+ ( it needs to be properly escaped by _log() )
+ """
+ f = sftp.open(sftp.FOLDER + "/test%file", "w")
+ try:
+ assert f.stat().st_size == 0
+ finally:
+ f.close()
+ sftp.remove(sftp.FOLDER + "/test%file")
+
+ def test_non_utf8_data(self, sftp):
+ """Test write() and read() of non utf8 data"""
+ try:
+ with sftp.open(f"{sftp.FOLDER}/nonutf8data", "w") as f:
+ f.write(NON_UTF8_DATA)
+ with sftp.open(f"{sftp.FOLDER}/nonutf8data", "r") as f:
+ data = f.read()
+ assert data == NON_UTF8_DATA
+ with sftp.open(f"{sftp.FOLDER}/nonutf8data", "wb") as f:
+ f.write(NON_UTF8_DATA)
+ with sftp.open(f"{sftp.FOLDER}/nonutf8data", "rb") as f:
+ data = f.read()
+ assert data == NON_UTF8_DATA
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/nonutf8data")
+
+ @requireNonAsciiLocale("LC_TIME")
+ def test_sftp_attributes_locale_time(self, sftp):
+ """Test SFTPAttributes under a locale with non-ascii time strings."""
+ some_stat = os.stat(sftp.FOLDER)
+ sftp_attributes = SFTPAttributes.from_stat(some_stat, u("a_directory"))
+ assert b"a_directory" in sftp_attributes.asbytes()
+
+ def test_sftp_attributes_empty_str(self, sftp):
+ sftp_attributes = SFTPAttributes()
+ assert (
+ str(sftp_attributes)
+ == "?--------- 1 0 0 0 (unknown date) ?"
+ )
+
+ @needs_builtin("buffer")
+ def test_write_buffer(self, sftp):
+ """Test write() using a buffer instance."""
+ data = 3 * b"A potentially large block of data to chunk up.\n"
+ try:
+ with sftp.open(f"{sftp.FOLDER}/write_buffer", "wb") as f:
+ for offset in range(0, len(data), 8):
+ f.write(buffer(data, offset, 8)) # noqa
+
+ with sftp.open(f"{sftp.FOLDER}/write_buffer", "rb") as f:
+ assert f.read() == data
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/write_buffer")
+
+ @needs_builtin("memoryview")
+ def test_write_memoryview(self, sftp):
+ """Test write() using a memoryview instance."""
+ data = 3 * b"A potentially large block of data to chunk up.\n"
+ try:
+ with sftp.open(f"{sftp.FOLDER}/write_memoryview", "wb") as f:
+ view = memoryview(data)
+ for offset in range(0, len(data), 8):
+ f.write(view[offset : offset + 8])
+
+ with sftp.open(f"{sftp.FOLDER}/write_memoryview", "rb") as f:
+ assert f.read() == data
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/write_memoryview")
diff --git a/tests/test_sftp_big.py b/tests/test_sftp_big.py
new file mode 100644
index 0000000..7d1110c
--- /dev/null
+++ b/tests/test_sftp_big.py
@@ -0,0 +1,416 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+some unit tests to make sure sftp works well with large files.
+
+a real actual sftp server is contacted, and a new folder is created there to
+do test file operations in (so no existing files will be harmed).
+"""
+
+import random
+import struct
+import sys
+import time
+
+from paramiko.common import o660
+
+from ._util import slow, wait_until
+
+
+@slow
+class TestBigSFTP:
+ def test_lots_of_files(self, sftp):
+ """
+ create a bunch of files over the same session.
+ """
+ numfiles = 100
+ try:
+ for i in range(numfiles):
+ target = f"{sftp.FOLDER}/file{i}.txt"
+ with sftp.open(target, "w", 1) as f:
+ f.write(f"this is file #{i}.\n")
+ sftp.chmod(target, o660)
+
+ # now make sure every file is there, by creating a list of filenmes
+ # and reading them in random order.
+ numlist = list(range(numfiles))
+ while len(numlist) > 0:
+ r = numlist[random.randint(0, len(numlist) - 1)]
+ with sftp.open(f"{sftp.FOLDER}/file{r}.txt") as f:
+ assert f.readline() == f"this is file #{r}.\n"
+ numlist.remove(r)
+ finally:
+ for i in range(numfiles):
+ try:
+ sftp.remove(f"{sftp.FOLDER}/file{i}.txt")
+ except:
+ pass
+
+ def test_big_file(self, sftp):
+ """
+ write a 1MB file with no buffering.
+ """
+ kblob = 1024 * b"x"
+ start = time.time()
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "w") as f:
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+ end = time.time()
+ sys.stderr.write(f"{round(end - start)}s")
+
+ start = time.time()
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "r") as f:
+ for n in range(1024):
+ data = f.read(1024)
+ assert data == kblob
+
+ end = time.time()
+ sys.stderr.write(f"{round(end - start)}s")
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_big_file_pipelined(self, sftp):
+ """
+ write a 1MB file, with no linefeeds, using pipelining.
+ """
+ kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
+ start = time.time()
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "wb") as f:
+ f.set_pipelined(True)
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+ end = time.time()
+ sys.stderr.write(f"{round(end - start)}s")
+
+ start = time.time()
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "rb") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+
+ # read on odd boundaries to make sure the bytes aren't getting
+ # scrambled
+ n = 0
+ k2blob = kblob + kblob
+ chunk = 629
+ size = 1024 * 1024
+ while n < size:
+ if n + chunk > size:
+ chunk = size - n
+ data = f.read(chunk)
+ offset = n % 1024
+ assert data == k2blob[offset : offset + chunk]
+ n += chunk
+
+ end = time.time()
+ sys.stderr.write(f"{round(end - start)}s")
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_prefetch_seek(self, sftp):
+ kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "wb") as f:
+ f.set_pipelined(True)
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+
+ start = time.time()
+ k2blob = kblob + kblob
+ chunk = 793
+ for i in range(10):
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "rb") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+ base_offset = (512 * 1024) + 17 * random.randint(
+ 1000, 2000
+ )
+ offsets = [base_offset + j * chunk for j in range(100)]
+ # randomly seek around and read them out
+ for j in range(100):
+ offset = offsets[random.randint(0, len(offsets) - 1)]
+ offsets.remove(offset)
+ f.seek(offset)
+ data = f.read(chunk)
+ n_offset = offset % 1024
+ assert data == k2blob[n_offset : n_offset + chunk]
+ offset += chunk
+ end = time.time()
+ sys.stderr.write(f"{round(end - start)}s")
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_readv_seek(self, sftp):
+ kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "wb") as f:
+ f.set_pipelined(True)
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+
+ start = time.time()
+ k2blob = kblob + kblob
+ chunk = 793
+ for i in range(10):
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "rb") as f:
+ base_offset = (512 * 1024) + 17 * random.randint(
+ 1000, 2000
+ )
+ # make a bunch of offsets and put them in random order
+ offsets = [base_offset + j * chunk for j in range(100)]
+ readv_list = []
+ for j in range(100):
+ o = offsets[random.randint(0, len(offsets) - 1)]
+ offsets.remove(o)
+ readv_list.append((o, chunk))
+ ret = f.readv(readv_list)
+ for i in range(len(readv_list)):
+ offset = readv_list[i][0]
+ n_offset = offset % 1024
+ assert next(ret) == k2blob[n_offset : n_offset + chunk]
+ end = time.time()
+ sys.stderr.write(f"{round(end - start)}s")
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_lots_of_prefetching(self, sftp):
+ """
+ prefetch a 1MB file a bunch of times, discarding the file object
+ without using it, to verify that paramiko doesn't get confused.
+ """
+ kblob = 1024 * b"x"
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "w") as f:
+ f.set_pipelined(True)
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+
+ for i in range(10):
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "r") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "r") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+ for n in range(1024):
+ data = f.read(1024)
+ assert data == kblob
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_prefetch_readv(self, sftp):
+ """
+ verify that prefetch and readv don't conflict with each other.
+ """
+ kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "wb") as f:
+ f.set_pipelined(True)
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "rb") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+ data = f.read(1024)
+ assert data == kblob
+
+ chunk_size = 793
+ base_offset = 512 * 1024
+ k2blob = kblob + kblob
+ chunks = [
+ (base_offset + (chunk_size * i), chunk_size)
+ for i in range(20)
+ ]
+ for data in f.readv(chunks):
+ offset = base_offset % 1024
+ assert chunk_size == len(data)
+ assert k2blob[offset : offset + chunk_size] == data
+ base_offset += chunk_size
+
+ sys.stderr.write(" ")
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_large_readv(self, sftp):
+ """
+ verify that a very large readv is broken up correctly and still
+ returned as a single blob.
+ """
+ kblob = bytes().join([struct.pack(">H", n) for n in range(512)])
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "wb") as f:
+ f.set_pipelined(True)
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "rb") as f:
+ data = list(f.readv([(23 * 1024, 128 * 1024)]))
+ assert len(data) == 1
+ data = data[0]
+ assert len(data) == 128 * 1024
+
+ sys.stderr.write(" ")
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_big_file_big_buffer(self, sftp):
+ """
+ write a 1MB file, with no linefeeds, and a big buffer.
+ """
+ mblob = 1024 * 1024 * "x"
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "w", 128 * 1024) as f:
+ f.write(mblob)
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+
+ def test_big_file_renegotiate(self, sftp):
+ """
+ write a 1MB file, forcing key renegotiation in the middle.
+ """
+ t = sftp.sock.get_transport()
+ t.packetizer.REKEY_BYTES = 512 * 1024
+ k32blob = 32 * 1024 * "x"
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "w", 128 * 1024) as f:
+ for i in range(32):
+ f.write(k32blob)
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+ assert t.H != t.session_id
+
+ # try to read it too.
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "r", 128 * 1024) as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+ total = 0
+ while total < 1024 * 1024:
+ total += len(f.read(32 * 1024))
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
+ t.packetizer.REKEY_BYTES = pow(2, 30)
+
+ def test_prefetch_limit(self, sftp):
+ """
+ write a 1MB file and prefetch with a limit
+ """
+ kblob = 1024 * b"x"
+ start = time.time()
+
+ def expect_prefetch_extents(file, expected_extents):
+ with file._prefetch_lock:
+ assert len(file._prefetch_extents) == expected_extents
+
+ try:
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "w") as f:
+ for n in range(1024):
+ f.write(kblob)
+ if n % 128 == 0:
+ sys.stderr.write(".")
+ sys.stderr.write(" ")
+
+ assert (
+ sftp.stat(f"{sftp.FOLDER}/hongry.txt").st_size == 1024 * 1024
+ )
+ end = time.time()
+ sys.stderr.write(f"{round(end - start)}s")
+
+ # read with prefetch, no limit
+ # expecting 32 requests (32k * 32 == 1M)
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "rb") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size)
+ wait_until(lambda: expect_prefetch_extents(f, 32))
+
+ # read with prefetch, limiting to 5 simultaneous requests
+ with sftp.open(f"{sftp.FOLDER}/hongry.txt", "rb") as f:
+ file_size = f.stat().st_size
+ f.prefetch(file_size, 5)
+ wait_until(lambda: expect_prefetch_extents(f, 5))
+ for n in range(1024):
+ with f._prefetch_lock:
+ assert len(f._prefetch_extents) <= 5
+ data = f.read(1024)
+ assert data == kblob
+
+ if n % 128 == 0:
+ sys.stderr.write(".")
+
+ finally:
+ sftp.remove(f"{sftp.FOLDER}/hongry.txt")
diff --git a/tests/test_ssh_exception.py b/tests/test_ssh_exception.py
new file mode 100644
index 0000000..1628986
--- /dev/null
+++ b/tests/test_ssh_exception.py
@@ -0,0 +1,75 @@
+import pickle
+import unittest
+
+from paramiko import RSAKey
+from paramiko.ssh_exception import (
+ NoValidConnectionsError,
+ BadAuthenticationType,
+ PartialAuthentication,
+ ChannelException,
+ BadHostKeyException,
+ ProxyCommandFailure,
+)
+
+
+class NoValidConnectionsErrorTest(unittest.TestCase):
+ def test_pickling(self):
+ # Regression test for https://github.com/paramiko/paramiko/issues/617
+ exc = NoValidConnectionsError({("127.0.0.1", "22"): Exception()})
+ new_exc = pickle.loads(pickle.dumps(exc))
+ self.assertEqual(type(exc), type(new_exc))
+ self.assertEqual(str(exc), str(new_exc))
+ self.assertEqual(exc.args, new_exc.args)
+
+ def test_error_message_for_single_host(self):
+ exc = NoValidConnectionsError({("127.0.0.1", "22"): Exception()})
+ assert "Unable to connect to port 22 on 127.0.0.1" in str(exc)
+
+ def test_error_message_for_two_hosts(self):
+ exc = NoValidConnectionsError(
+ {("127.0.0.1", "22"): Exception(), ("::1", "22"): Exception()}
+ )
+ assert "Unable to connect to port 22 on 127.0.0.1 or ::1" in str(exc)
+
+ def test_error_message_for_multiple_hosts(self):
+ exc = NoValidConnectionsError(
+ {
+ ("127.0.0.1", "22"): Exception(),
+ ("::1", "22"): Exception(),
+ ("10.0.0.42", "22"): Exception(),
+ }
+ )
+ exp = "Unable to connect to port 22 on 10.0.0.42, 127.0.0.1 or ::1"
+ assert exp in str(exc)
+
+
+class ExceptionStringDisplayTest(unittest.TestCase):
+ def test_BadAuthenticationType(self):
+ exc = BadAuthenticationType(
+ "Bad authentication type", ["ok", "also-ok"]
+ )
+ expected = "Bad authentication type; allowed types: ['ok', 'also-ok']"
+ assert str(exc) == expected
+
+ def test_PartialAuthentication(self):
+ exc = PartialAuthentication(["ok", "also-ok"])
+ expected = "Partial authentication; allowed types: ['ok', 'also-ok']"
+ assert str(exc) == expected
+
+ def test_BadHostKeyException(self):
+ got_key = RSAKey.generate(2048)
+ wanted_key = RSAKey.generate(2048)
+ exc = BadHostKeyException("myhost", got_key, wanted_key)
+ expected = "Host key for server 'myhost' does not match: got '{}', expected '{}'" # noqa
+ assert str(exc) == expected.format(
+ got_key.get_base64(), wanted_key.get_base64()
+ )
+
+ def test_ProxyCommandFailure(self):
+ exc = ProxyCommandFailure("man squid", 7)
+ expected = 'ProxyCommand("man squid") returned nonzero exit status: 7'
+ assert str(exc) == expected
+
+ def test_ChannelException(self):
+ exc = ChannelException(17, "whatever")
+ assert str(exc) == "ChannelException(17, 'whatever')"
diff --git a/tests/test_ssh_gss.py b/tests/test_ssh_gss.py
new file mode 100644
index 0000000..b441a22
--- /dev/null
+++ b/tests/test_ssh_gss.py
@@ -0,0 +1,160 @@
+# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
+# Copyright (C) 2013-2014 science + computing ag
+# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
+#
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Unit Tests for the GSS-API / SSPI SSHv2 Authentication (gssapi-with-mic)
+"""
+
+import socket
+import threading
+
+import paramiko
+
+from ._util import _support, needs_gssapi, KerberosTestCase, update_env
+from .test_client import FINGERPRINTS
+
+
+class NullServer(paramiko.ServerInterface):
+ def get_allowed_auths(self, username):
+ return "gssapi-with-mic,publickey"
+
+ def check_auth_gssapi_with_mic(
+ self, username, gss_authenticated=paramiko.AUTH_FAILED, cc_file=None
+ ):
+ if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def enable_auth_gssapi(self):
+ return True
+
+ def check_auth_publickey(self, username, key):
+ try:
+ expected = FINGERPRINTS[key.get_name()]
+ except KeyError:
+ return paramiko.AUTH_FAILED
+ else:
+ if key.get_fingerprint() == expected:
+ return paramiko.AUTH_SUCCESSFUL
+ return paramiko.AUTH_FAILED
+
+ def check_channel_request(self, kind, chanid):
+ return paramiko.OPEN_SUCCEEDED
+
+ def check_channel_exec_request(self, channel, command):
+ if command != b"yes":
+ return False
+ return True
+
+
+@needs_gssapi
+class GSSAuthTest(KerberosTestCase):
+ def setUp(self):
+ # TODO: username and targ_name should come from os.environ or whatever
+ # the approved pytest method is for runtime-configuring test data.
+ self.username = self.realm.user_princ
+ self.hostname = socket.getfqdn(self.realm.hostname)
+ self.sockl = socket.socket()
+ self.sockl.bind((self.realm.hostname, 0))
+ self.sockl.listen(1)
+ self.addr, self.port = self.sockl.getsockname()
+ self.event = threading.Event()
+ update_env(self, self.realm.env)
+ thread = threading.Thread(target=self._run)
+ thread.start()
+
+ def tearDown(self):
+ for attr in "tc ts socks sockl".split():
+ if hasattr(self, attr):
+ getattr(self, attr).close()
+
+ def _run(self):
+ self.socks, addr = self.sockl.accept()
+ self.ts = paramiko.Transport(self.socks)
+ host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
+ self.ts.add_server_key(host_key)
+ server = NullServer()
+ self.ts.start_server(self.event, server)
+
+ def _test_connection(self, **kwargs):
+ """
+ (Most) kwargs get passed directly into SSHClient.connect().
+
+ The exception is ... no exception yet
+ """
+ host_key = paramiko.RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = paramiko.RSAKey(data=host_key.asbytes())
+
+ self.tc = paramiko.SSHClient()
+ self.tc.set_missing_host_key_policy(paramiko.WarningPolicy())
+ self.tc.get_host_keys().add(
+ f"[{self.addr}]:{self.port}", "ssh-rsa", public_host_key
+ )
+ self.tc.connect(
+ hostname=self.addr,
+ port=self.port,
+ username=self.username,
+ gss_host=self.hostname,
+ gss_auth=True,
+ **kwargs,
+ )
+
+ self.event.wait(1.0)
+ self.assert_(self.event.is_set())
+ self.assert_(self.ts.is_active())
+ self.assertEquals(self.username, self.ts.get_username())
+ self.assertEquals(True, self.ts.is_authenticated())
+
+ stdin, stdout, stderr = self.tc.exec_command("yes")
+ schan = self.ts.accept(1.0)
+
+ schan.send("Hello there.\n")
+ schan.send_stderr("This is on stderr.\n")
+ schan.close()
+
+ self.assertEquals("Hello there.\n", stdout.readline())
+ self.assertEquals("", stdout.readline())
+ self.assertEquals("This is on stderr.\n", stderr.readline())
+ self.assertEquals("", stderr.readline())
+
+ stdin.close()
+ stdout.close()
+ stderr.close()
+
+ def test_gss_auth(self):
+ """
+ Verify that Paramiko can handle SSHv2 GSS-API / SSPI authentication
+ (gssapi-with-mic) in client and server mode.
+ """
+ self._test_connection(allow_agent=False, look_for_keys=False)
+
+ def test_auth_trickledown(self):
+ """
+ Failed gssapi-with-mic doesn't prevent subsequent key from succeeding
+ """
+ self.hostname = (
+ "this_host_does_not_exists_and_causes_a_GSSAPI-exception"
+ )
+ self._test_connection(
+ key_filename=[_support("rsa.key")],
+ allow_agent=False,
+ look_for_keys=False,
+ )
diff --git a/tests/test_transport.py b/tests/test_transport.py
new file mode 100644
index 0000000..67e2eb4
--- /dev/null
+++ b/tests/test_transport.py
@@ -0,0 +1,1446 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for the ssh2 protocol in Transport.
+"""
+
+
+from binascii import hexlify
+import itertools
+import select
+import socket
+import time
+import threading
+import random
+import sys
+import unittest
+from unittest.mock import Mock
+
+from paramiko import (
+ AuthHandler,
+ ChannelException,
+ IncompatiblePeer,
+ MessageOrderError,
+ Packetizer,
+ RSAKey,
+ SSHException,
+ SecurityOptions,
+ ServiceRequestingTransport,
+ Transport,
+)
+from paramiko.auth_handler import AuthOnlyHandler
+from paramiko import OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+from paramiko.common import (
+ DEFAULT_MAX_PACKET_SIZE,
+ DEFAULT_WINDOW_SIZE,
+ MAX_WINDOW_SIZE,
+ MIN_PACKET_SIZE,
+ MIN_WINDOW_SIZE,
+ MSG_CHANNEL_OPEN,
+ MSG_DEBUG,
+ MSG_IGNORE,
+ MSG_KEXINIT,
+ MSG_UNIMPLEMENTED,
+ MSG_USERAUTH_SUCCESS,
+ byte_chr,
+ cMSG_CHANNEL_WINDOW_ADJUST,
+ cMSG_UNIMPLEMENTED,
+)
+from paramiko.message import Message
+
+from ._util import (
+ needs_builtin,
+ _support,
+ requires_sha1_signing,
+ slow,
+ server,
+ _disable_sha2,
+ _disable_sha1,
+ TestServer as NullServer,
+)
+from ._loop import LoopSocket
+from pytest import mark, raises
+
+
+LONG_BANNER = """\
+Welcome to the super-fun-land BBS, where our MOTD is the primary thing we
+provide. All rights reserved. Offer void in Tennessee. Stunt drivers were
+used. Do not attempt at home. Some restrictions apply.
+
+Happy birthday to Commie the cat!
+
+Note: An SSH banner may eventually appear.
+
+Maybe.
+"""
+
+# Faux 'packet type' we do not implement and are unlikely ever to (but which is
+# technically "within spec" re RFC 4251
+MSG_FUGGEDABOUTIT = 253
+
+
+class TransportTest(unittest.TestCase):
+ # TODO: this can get nuked once ServiceRequestingTransport becomes the
+ # only Transport, as it has this baked in.
+ _auth_handler_class = AuthHandler
+
+ def setUp(self):
+ self.socks = LoopSocket()
+ self.sockc = LoopSocket()
+ self.sockc.link(self.socks)
+ self.tc = Transport(self.sockc)
+ self.ts = Transport(self.socks)
+
+ def tearDown(self):
+ self.tc.close()
+ self.ts.close()
+ self.socks.close()
+ self.sockc.close()
+
+ # TODO: unify with newer contextmanager
+ def setup_test_server(
+ self, client_options=None, server_options=None, connect_kwargs=None
+ ):
+ host_key = RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = RSAKey(data=host_key.asbytes())
+ self.ts.add_server_key(host_key)
+
+ if client_options is not None:
+ client_options(self.tc.get_security_options())
+ if server_options is not None:
+ server_options(self.ts.get_security_options())
+
+ event = threading.Event()
+ self.server = NullServer()
+ self.assertTrue(not event.is_set())
+ self.ts.start_server(event, self.server)
+ if connect_kwargs is None:
+ connect_kwargs = dict(
+ hostkey=public_host_key,
+ username="slowdive",
+ password="pygmalion",
+ )
+ self.tc.connect(**connect_kwargs)
+ event.wait(1.0)
+ self.assertTrue(event.is_set())
+ self.assertTrue(self.ts.is_active())
+
+ def test_security_options(self):
+ o = self.tc.get_security_options()
+ self.assertEqual(type(o), SecurityOptions)
+ self.assertTrue(("aes256-cbc", "aes192-cbc") != o.ciphers)
+ o.ciphers = ("aes256-cbc", "aes192-cbc")
+ self.assertEqual(("aes256-cbc", "aes192-cbc"), o.ciphers)
+ try:
+ o.ciphers = ("aes256-cbc", "made-up-cipher")
+ self.assertTrue(False)
+ except ValueError:
+ pass
+ try:
+ o.ciphers = 23
+ self.assertTrue(False)
+ except TypeError:
+ pass
+
+ def testb_security_options_reset(self):
+ o = self.tc.get_security_options()
+ # should not throw any exceptions
+ o.ciphers = o.ciphers
+ o.digests = o.digests
+ o.key_types = o.key_types
+ o.kex = o.kex
+ o.compression = o.compression
+
+ def test_compute_key(self):
+ self.tc.K = 123281095979686581523377256114209720774539068973101330872763622971399429481072519713536292772709507296759612401802191955568143056534122385270077606457721553469730659233569339356140085284052436697480759510519672848743794433460113118986816826624865291116513647975790797391795651716378444844877749505443714557929 # noqa
+ self.tc.H = b"\x0C\x83\x07\xCD\xE6\x85\x6F\xF3\x0B\xA9\x36\x84\xEB\x0F\x04\xC2\x52\x0E\x9E\xD3" # noqa
+ self.tc.session_id = self.tc.H
+ key = self.tc._compute_key("C", 32)
+ self.assertEqual(
+ b"207E66594CA87C44ECCBA3B3CD39FDDB378E6FDB0F97C54B2AA0CFBF900CD995", # noqa
+ hexlify(key).upper(),
+ )
+
+ def test_simple(self):
+ """
+ verify that we can establish an ssh link with ourselves across the
+ loopback sockets. this is hardly "simple" but it's simpler than the
+ later tests. :)
+ """
+ host_key = RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = RSAKey(data=host_key.asbytes())
+ self.ts.add_server_key(host_key)
+ event = threading.Event()
+ server = NullServer()
+ self.assertTrue(not event.is_set())
+ self.assertEqual(None, self.tc.get_username())
+ self.assertEqual(None, self.ts.get_username())
+ self.assertEqual(False, self.tc.is_authenticated())
+ self.assertEqual(False, self.ts.is_authenticated())
+ self.ts.start_server(event, server)
+ self.tc.connect(
+ hostkey=public_host_key, username="slowdive", password="pygmalion"
+ )
+ event.wait(1.0)
+ self.assertTrue(event.is_set())
+ self.assertTrue(self.ts.is_active())
+ self.assertEqual("slowdive", self.tc.get_username())
+ self.assertEqual("slowdive", self.ts.get_username())
+ self.assertEqual(True, self.tc.is_authenticated())
+ self.assertEqual(True, self.ts.is_authenticated())
+
+ def test_long_banner(self):
+ """
+ verify that a long banner doesn't mess up the handshake.
+ """
+ host_key = RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = RSAKey(data=host_key.asbytes())
+ self.ts.add_server_key(host_key)
+ event = threading.Event()
+ server = NullServer()
+ self.assertTrue(not event.is_set())
+ self.socks.send(LONG_BANNER)
+ self.ts.start_server(event, server)
+ self.tc.connect(
+ hostkey=public_host_key, username="slowdive", password="pygmalion"
+ )
+ event.wait(1.0)
+ self.assertTrue(event.is_set())
+ self.assertTrue(self.ts.is_active())
+
+ def test_special(self):
+ """
+ verify that the client can demand odd handshake settings, and can
+ renegotiate keys in mid-stream.
+ """
+
+ def force_algorithms(options):
+ options.ciphers = ("aes256-cbc",)
+ options.digests = ("hmac-md5-96",)
+
+ self.setup_test_server(client_options=force_algorithms)
+ self.assertEqual("aes256-cbc", self.tc.local_cipher)
+ self.assertEqual("aes256-cbc", self.tc.remote_cipher)
+ self.assertEqual(12, self.tc.packetizer.get_mac_size_out())
+ self.assertEqual(12, self.tc.packetizer.get_mac_size_in())
+
+ self.tc.send_ignore(1024)
+ self.tc.renegotiate_keys()
+ self.ts.send_ignore(1024)
+
+ @slow
+ def test_keepalive(self):
+ """
+ verify that the keepalive will be sent.
+ """
+ self.setup_test_server()
+ self.assertEqual(None, getattr(self.server, "_global_request", None))
+ self.tc.set_keepalive(1)
+ time.sleep(2)
+ self.assertEqual("keepalive@lag.net", self.server._global_request)
+
+ def test_exec_command(self):
+ """
+ verify that exec_command() does something reasonable.
+ """
+ self.setup_test_server()
+
+ chan = self.tc.open_session()
+ schan = self.ts.accept(1.0)
+ try:
+ chan.exec_command(
+ b"command contains \xfc and is not a valid UTF-8 string"
+ )
+ self.assertTrue(False)
+ except SSHException:
+ pass
+
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ schan = self.ts.accept(1.0)
+ schan.send("Hello there.\n")
+ schan.send_stderr("This is on stderr.\n")
+ schan.close()
+
+ f = chan.makefile()
+ self.assertEqual("Hello there.\n", f.readline())
+ self.assertEqual("", f.readline())
+ f = chan.makefile_stderr()
+ self.assertEqual("This is on stderr.\n", f.readline())
+ self.assertEqual("", f.readline())
+
+ # now try it with combined stdout/stderr
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ schan = self.ts.accept(1.0)
+ schan.send("Hello there.\n")
+ schan.send_stderr("This is on stderr.\n")
+ schan.close()
+
+ chan.set_combine_stderr(True)
+ f = chan.makefile()
+ self.assertEqual("Hello there.\n", f.readline())
+ self.assertEqual("This is on stderr.\n", f.readline())
+ self.assertEqual("", f.readline())
+
+ def test_channel_can_be_used_as_context_manager(self):
+ """
+ verify that exec_command() does something reasonable.
+ """
+ self.setup_test_server()
+
+ with self.tc.open_session() as chan:
+ with self.ts.accept(1.0) as schan:
+ chan.exec_command("yes")
+ schan.send("Hello there.\n")
+ schan.close()
+
+ f = chan.makefile()
+ self.assertEqual("Hello there.\n", f.readline())
+ self.assertEqual("", f.readline())
+
+ def test_invoke_shell(self):
+ """
+ verify that invoke_shell() does something reasonable.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.invoke_shell()
+ schan = self.ts.accept(1.0)
+ chan.send("communist j. cat\n")
+ f = schan.makefile()
+ self.assertEqual("communist j. cat\n", f.readline())
+ chan.close()
+ self.assertEqual("", f.readline())
+
+ def test_channel_exception(self):
+ """
+ verify that ChannelException is thrown for a bad open-channel request.
+ """
+ self.setup_test_server()
+ try:
+ self.tc.open_channel("bogus")
+ self.fail("expected exception")
+ except ChannelException as e:
+ self.assertTrue(e.code == OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED)
+
+ def test_exit_status(self):
+ """
+ verify that get_exit_status() works.
+ """
+ self.setup_test_server()
+
+ chan = self.tc.open_session()
+ schan = self.ts.accept(1.0)
+ chan.exec_command("yes")
+ schan.send("Hello there.\n")
+ self.assertTrue(not chan.exit_status_ready())
+ # trigger an EOF
+ schan.shutdown_read()
+ schan.shutdown_write()
+ schan.send_exit_status(23)
+ schan.close()
+
+ f = chan.makefile()
+ self.assertEqual("Hello there.\n", f.readline())
+ self.assertEqual("", f.readline())
+ count = 0
+ while not chan.exit_status_ready():
+ time.sleep(0.1)
+ count += 1
+ if count > 50:
+ raise Exception("timeout")
+ self.assertEqual(23, chan.recv_exit_status())
+ chan.close()
+
+ def test_select(self):
+ """
+ verify that select() on a channel works.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.invoke_shell()
+ schan = self.ts.accept(1.0)
+
+ # nothing should be ready
+ r, w, e = select.select([chan], [], [], 0.1)
+ self.assertEqual([], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+
+ schan.send("hello\n")
+
+ # something should be ready now (give it 1 second to appear)
+ for i in range(10):
+ r, w, e = select.select([chan], [], [], 0.1)
+ if chan in r:
+ break
+ time.sleep(0.1)
+ self.assertEqual([chan], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+
+ self.assertEqual(b"hello\n", chan.recv(6))
+
+ # and, should be dead again now
+ r, w, e = select.select([chan], [], [], 0.1)
+ self.assertEqual([], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+
+ schan.close()
+
+ # detect eof?
+ for i in range(10):
+ r, w, e = select.select([chan], [], [], 0.1)
+ if chan in r:
+ break
+ time.sleep(0.1)
+ self.assertEqual([chan], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+ self.assertEqual(b"", chan.recv(16))
+
+ # make sure the pipe is still open for now...
+ p = chan._pipe
+ self.assertEqual(False, p._closed)
+ chan.close()
+ # ...and now is closed.
+ self.assertEqual(True, p._closed)
+
+ def test_renegotiate(self):
+ """
+ verify that a transport can correctly renegotiate mid-stream.
+ """
+ self.setup_test_server()
+ self.tc.packetizer.REKEY_BYTES = 16384
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ schan = self.ts.accept(1.0)
+
+ self.assertEqual(self.tc.H, self.tc.session_id)
+ for i in range(20):
+ chan.send("x" * 1024)
+ chan.close()
+
+ # allow a few seconds for the rekeying to complete
+ for i in range(50):
+ if self.tc.H != self.tc.session_id:
+ break
+ time.sleep(0.1)
+ self.assertNotEqual(self.tc.H, self.tc.session_id)
+
+ schan.close()
+
+ def test_compression(self):
+ """
+ verify that zlib compression is basically working.
+ """
+
+ def force_compression(o):
+ o.compression = ("zlib",)
+
+ self.setup_test_server(force_compression, force_compression)
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ schan = self.ts.accept(1.0)
+
+ bytes = self.tc.packetizer._Packetizer__sent_bytes
+ chan.send("x" * 1024)
+ bytes2 = self.tc.packetizer._Packetizer__sent_bytes
+ block_size = self.tc._cipher_info[self.tc.local_cipher]["block-size"]
+ mac_size = self.tc._mac_info[self.tc.local_mac]["size"]
+ # tests show this is actually compressed to *52 bytes*! including
+ # packet overhead! nice!! :)
+ self.assertTrue(bytes2 - bytes < 1024)
+ self.assertEqual(16 + block_size + mac_size, bytes2 - bytes)
+
+ chan.close()
+ schan.close()
+
+ def test_x11(self):
+ """
+ verify that an x11 port can be requested and opened.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ schan = self.ts.accept(1.0)
+
+ requested = []
+
+ def handler(c, addr_port):
+ addr, port = addr_port
+ requested.append((addr, port))
+ self.tc._queue_incoming_channel(c)
+
+ self.assertEqual(
+ None, getattr(self.server, "_x11_screen_number", None)
+ )
+ cookie = chan.request_x11(0, single_connection=True, handler=handler)
+ self.assertEqual(0, self.server._x11_screen_number)
+ self.assertEqual("MIT-MAGIC-COOKIE-1", self.server._x11_auth_protocol)
+ self.assertEqual(cookie, self.server._x11_auth_cookie)
+ self.assertEqual(True, self.server._x11_single_connection)
+
+ x11_server = self.ts.open_x11_channel(("localhost", 6093))
+ x11_client = self.tc.accept()
+ self.assertEqual("localhost", requested[0][0])
+ self.assertEqual(6093, requested[0][1])
+
+ x11_server.send("hello")
+ self.assertEqual(b"hello", x11_client.recv(5))
+
+ x11_server.close()
+ x11_client.close()
+ chan.close()
+ schan.close()
+
+ def test_reverse_port_forwarding(self):
+ """
+ verify that a client can ask the server to open a reverse port for
+ forwarding.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ self.ts.accept(1.0)
+
+ requested = []
+
+ def handler(c, origin_addr_port, server_addr_port):
+ requested.append(origin_addr_port)
+ requested.append(server_addr_port)
+ self.tc._queue_incoming_channel(c)
+
+ port = self.tc.request_port_forward("127.0.0.1", 0, handler)
+ self.assertEqual(port, self.server._listen.getsockname()[1])
+
+ cs = socket.socket()
+ cs.connect(("127.0.0.1", port))
+ ss, _ = self.server._listen.accept()
+ sch = self.ts.open_forwarded_tcpip_channel(
+ ss.getsockname(), ss.getpeername()
+ )
+ cch = self.tc.accept()
+
+ sch.send("hello")
+ self.assertEqual(b"hello", cch.recv(5))
+ sch.close()
+ cch.close()
+ ss.close()
+ cs.close()
+
+ # now cancel it.
+ self.tc.cancel_port_forward("127.0.0.1", port)
+ self.assertTrue(self.server._listen is None)
+
+ def test_port_forwarding(self):
+ """
+ verify that a client can forward new connections from a locally-
+ forwarded port.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ self.ts.accept(1.0)
+
+ # open a port on the "server" that the client will ask to forward to.
+ greeting_server = socket.socket()
+ greeting_server.bind(("127.0.0.1", 0))
+ greeting_server.listen(1)
+ greeting_port = greeting_server.getsockname()[1]
+
+ cs = self.tc.open_channel(
+ "direct-tcpip", ("127.0.0.1", greeting_port), ("", 9000)
+ )
+ sch = self.ts.accept(1.0)
+ cch = socket.socket()
+ cch.connect(self.server._tcpip_dest)
+
+ ss, _ = greeting_server.accept()
+ ss.send(b"Hello!\n")
+ ss.close()
+ sch.send(cch.recv(8192))
+ sch.close()
+
+ self.assertEqual(b"Hello!\n", cs.recv(7))
+ cs.close()
+
+ def test_stderr_select(self):
+ """
+ verify that select() on a channel works even if only stderr is
+ receiving data.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.invoke_shell()
+ schan = self.ts.accept(1.0)
+
+ # nothing should be ready
+ r, w, e = select.select([chan], [], [], 0.1)
+ self.assertEqual([], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+
+ schan.send_stderr("hello\n")
+
+ # something should be ready now (give it 1 second to appear)
+ for i in range(10):
+ r, w, e = select.select([chan], [], [], 0.1)
+ if chan in r:
+ break
+ time.sleep(0.1)
+ self.assertEqual([chan], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+
+ self.assertEqual(b"hello\n", chan.recv_stderr(6))
+
+ # and, should be dead again now
+ r, w, e = select.select([chan], [], [], 0.1)
+ self.assertEqual([], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+
+ schan.close()
+ chan.close()
+
+ def test_send_ready(self):
+ """
+ verify that send_ready() indicates when a send would not block.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.invoke_shell()
+ schan = self.ts.accept(1.0)
+
+ self.assertEqual(chan.send_ready(), True)
+ total = 0
+ K = "*" * 1024
+ limit = 1 + (64 * 2**15)
+ while total < limit:
+ chan.send(K)
+ total += len(K)
+ if not chan.send_ready():
+ break
+ self.assertTrue(total < limit)
+
+ schan.close()
+ chan.close()
+ self.assertEqual(chan.send_ready(), True)
+
+ def test_rekey_deadlock(self):
+ """
+ Regression test for deadlock when in-transit messages are received
+ after MSG_KEXINIT is sent
+
+ Note: When this test fails, it may leak threads.
+ """
+
+ # Test for an obscure deadlocking bug that can occur if we receive
+ # certain messages while initiating a key exchange.
+ #
+ # The deadlock occurs as follows:
+ #
+ # In the main thread:
+ # 1. The user's program calls Channel.send(), which sends
+ # MSG_CHANNEL_DATA to the remote host.
+ # 2. Packetizer discovers that REKEY_BYTES has been exceeded, and
+ # sets the __need_rekey flag.
+ #
+ # In the Transport thread:
+ # 3. Packetizer notices that the __need_rekey flag is set, and raises
+ # NeedRekeyException.
+ # 4. In response to NeedRekeyException, the transport thread sends
+ # MSG_KEXINIT to the remote host.
+ #
+ # On the remote host (using any SSH implementation):
+ # 5. The MSG_CHANNEL_DATA is received, and MSG_CHANNEL_WINDOW_ADJUST
+ # is sent.
+ # 6. The MSG_KEXINIT is received, and a corresponding MSG_KEXINIT is
+ # sent.
+ #
+ # In the main thread:
+ # 7. The user's program calls Channel.send().
+ # 8. Channel.send acquires Channel.lock, then calls
+ # Transport._send_user_message().
+ # 9. Transport._send_user_message waits for Transport.clear_to_send
+ # to be set (i.e., it waits for re-keying to complete).
+ # Channel.lock is still held.
+ #
+ # In the Transport thread:
+ # 10. MSG_CHANNEL_WINDOW_ADJUST is received; Channel._window_adjust
+ # is called to handle it.
+ # 11. Channel._window_adjust tries to acquire Channel.lock, but it
+ # blocks because the lock is already held by the main thread.
+ #
+ # The result is that the Transport thread never processes the remote
+ # host's MSG_KEXINIT packet, because it becomes deadlocked while
+ # handling the preceding MSG_CHANNEL_WINDOW_ADJUST message.
+
+ # We set up two separate threads for sending and receiving packets,
+ # while the main thread acts as a watchdog timer. If the timer
+ # expires, a deadlock is assumed.
+
+ class SendThread(threading.Thread):
+ def __init__(self, chan, iterations, done_event):
+ threading.Thread.__init__(
+ self, None, None, self.__class__.__name__
+ )
+ self.daemon = True
+ self.chan = chan
+ self.iterations = iterations
+ self.done_event = done_event
+ self.watchdog_event = threading.Event()
+ self.last = None
+
+ def run(self):
+ try:
+ for i in range(1, 1 + self.iterations):
+ if self.done_event.is_set():
+ break
+ self.watchdog_event.set()
+ # print i, "SEND"
+ self.chan.send("x" * 2048)
+ finally:
+ self.done_event.set()
+ self.watchdog_event.set()
+
+ class ReceiveThread(threading.Thread):
+ def __init__(self, chan, done_event):
+ threading.Thread.__init__(
+ self, None, None, self.__class__.__name__
+ )
+ self.daemon = True
+ self.chan = chan
+ self.done_event = done_event
+ self.watchdog_event = threading.Event()
+
+ def run(self):
+ try:
+ while not self.done_event.is_set():
+ if self.chan.recv_ready():
+ chan.recv(65536)
+ self.watchdog_event.set()
+ else:
+ if random.randint(0, 1):
+ time.sleep(random.randint(0, 500) / 1000.0)
+ finally:
+ self.done_event.set()
+ self.watchdog_event.set()
+
+ self.setup_test_server()
+ self.ts.packetizer.REKEY_BYTES = 2048
+
+ chan = self.tc.open_session()
+ chan.exec_command("yes")
+ schan = self.ts.accept(1.0)
+
+ # Monkey patch the client's Transport._handler_table so that the client
+ # sends MSG_CHANNEL_WINDOW_ADJUST whenever it receives an initial
+ # MSG_KEXINIT. This is used to simulate the effect of network latency
+ # on a real MSG_CHANNEL_WINDOW_ADJUST message.
+ self.tc._handler_table = (
+ self.tc._handler_table.copy()
+ ) # copy per-class dictionary
+ _negotiate_keys = self.tc._handler_table[MSG_KEXINIT]
+
+ def _negotiate_keys_wrapper(self, m):
+ if self.local_kex_init is None: # Remote side sent KEXINIT
+ # Simulate in-transit MSG_CHANNEL_WINDOW_ADJUST by sending it
+ # before responding to the incoming MSG_KEXINIT.
+ m2 = Message()
+ m2.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
+ m2.add_int(chan.remote_chanid)
+ m2.add_int(1) # bytes to add
+ self._send_message(m2)
+ return _negotiate_keys(self, m)
+
+ self.tc._handler_table[MSG_KEXINIT] = _negotiate_keys_wrapper
+
+ # Parameters for the test
+ iterations = 500 # The deadlock does not happen every time, but it
+ # should after many iterations.
+ timeout = 5
+
+ # This event is set when the test is completed
+ done_event = threading.Event()
+
+ # Start the sending thread
+ st = SendThread(schan, iterations, done_event)
+ st.start()
+
+ # Start the receiving thread
+ rt = ReceiveThread(chan, done_event)
+ rt.start()
+
+ # Act as a watchdog timer, checking
+ deadlocked = False
+ while not deadlocked and not done_event.is_set():
+ for event in (st.watchdog_event, rt.watchdog_event):
+ event.wait(timeout)
+ if done_event.is_set():
+ break
+ if not event.is_set():
+ deadlocked = True
+ break
+ event.clear()
+
+ # Tell the threads to stop (if they haven't already stopped). Note
+ # that if one or more threads are deadlocked, they might hang around
+ # forever (until the process exits).
+ done_event.set()
+
+ # Assertion: We must not have detected a timeout.
+ self.assertFalse(deadlocked)
+
+ # Close the channels
+ schan.close()
+ chan.close()
+
+ def test_sanitze_packet_size(self):
+ """
+ verify that we conform to the rfc of packet and window sizes.
+ """
+ for val, correct in [
+ (4095, MIN_PACKET_SIZE),
+ (None, DEFAULT_MAX_PACKET_SIZE),
+ (2**32, MAX_WINDOW_SIZE),
+ ]:
+ self.assertEqual(self.tc._sanitize_packet_size(val), correct)
+
+ def test_sanitze_window_size(self):
+ """
+ verify that we conform to the rfc of packet and window sizes.
+ """
+ for val, correct in [
+ (32767, MIN_WINDOW_SIZE),
+ (None, DEFAULT_WINDOW_SIZE),
+ (2**32, MAX_WINDOW_SIZE),
+ ]:
+ self.assertEqual(self.tc._sanitize_window_size(val), correct)
+
+ @slow
+ def test_handshake_timeout(self):
+ """
+ verify that we can get a handshake timeout.
+ """
+ # Tweak client Transport instance's Packetizer instance so
+ # its read_message() sleeps a bit. This helps prevent race conditions
+ # where the client Transport's timeout timer thread doesn't even have
+ # time to get scheduled before the main client thread finishes
+ # handshaking with the server.
+ # (Doing this on the server's transport *sounds* more 'correct' but
+ # actually doesn't work nearly as well for whatever reason.)
+ class SlowPacketizer(Packetizer):
+ def read_message(self):
+ time.sleep(1)
+ return super().read_message()
+
+ # NOTE: prettttty sure since the replaced .packetizer Packetizer is now
+ # no longer doing anything with its copy of the socket...everything'll
+ # be fine. Even tho it's a bit squicky.
+ self.tc.packetizer = SlowPacketizer(self.tc.sock)
+ # Continue with regular test red tape.
+ host_key = RSAKey.from_private_key_file(_support("rsa.key"))
+ public_host_key = RSAKey(data=host_key.asbytes())
+ self.ts.add_server_key(host_key)
+ event = threading.Event()
+ server = NullServer()
+ self.assertTrue(not event.is_set())
+ self.tc.handshake_timeout = 0.000000000001
+ self.ts.start_server(event, server)
+ self.assertRaises(
+ EOFError,
+ self.tc.connect,
+ hostkey=public_host_key,
+ username="slowdive",
+ password="pygmalion",
+ )
+
+ def test_select_after_close(self):
+ """
+ verify that select works when a channel is already closed.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.invoke_shell()
+ schan = self.ts.accept(1.0)
+ schan.close()
+
+ # give client a moment to receive close notification
+ time.sleep(0.1)
+
+ r, w, e = select.select([chan], [], [], 0.1)
+ self.assertEqual([chan], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
+
+ def test_channel_send_misc(self):
+ """
+ verify behaviours sending various instances to a channel
+ """
+ self.setup_test_server()
+ text = "\xa7 slice me nicely"
+ with self.tc.open_session() as chan:
+ schan = self.ts.accept(1.0)
+ if schan is None:
+ self.fail("Test server transport failed to accept")
+ sfile = schan.makefile()
+
+ # TypeError raised on non string or buffer type
+ self.assertRaises(TypeError, chan.send, object())
+ self.assertRaises(TypeError, chan.sendall, object())
+
+ # sendall() accepts a unicode instance
+ chan.sendall(text)
+ expected = text.encode("utf-8")
+ self.assertEqual(sfile.read(len(expected)), expected)
+
+ @needs_builtin("buffer")
+ def test_channel_send_buffer(self):
+ """
+ verify sending buffer instances to a channel
+ """
+ self.setup_test_server()
+ data = 3 * b"some test data\n whole"
+ with self.tc.open_session() as chan:
+ schan = self.ts.accept(1.0)
+ if schan is None:
+ self.fail("Test server transport failed to accept")
+ sfile = schan.makefile()
+
+ # send() accepts buffer instances
+ sent = 0
+ while sent < len(data):
+ sent += chan.send(buffer(data, sent, 8)) # noqa
+ self.assertEqual(sfile.read(len(data)), data)
+
+ # sendall() accepts a buffer instance
+ chan.sendall(buffer(data)) # noqa
+ self.assertEqual(sfile.read(len(data)), data)
+
+ @needs_builtin("memoryview")
+ def test_channel_send_memoryview(self):
+ """
+ verify sending memoryview instances to a channel
+ """
+ self.setup_test_server()
+ data = 3 * b"some test data\n whole"
+ with self.tc.open_session() as chan:
+ schan = self.ts.accept(1.0)
+ if schan is None:
+ self.fail("Test server transport failed to accept")
+ sfile = schan.makefile()
+
+ # send() accepts memoryview slices
+ sent = 0
+ view = memoryview(data)
+ while sent < len(view):
+ sent += chan.send(view[sent : sent + 8])
+ self.assertEqual(sfile.read(len(data)), data)
+
+ # sendall() accepts a memoryview instance
+ chan.sendall(memoryview(data))
+ self.assertEqual(sfile.read(len(data)), data)
+
+ def test_server_rejects_open_channel_without_auth(self):
+ try:
+ self.setup_test_server(connect_kwargs={})
+ self.tc.open_session()
+ except ChannelException as e:
+ assert e.code == OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
+ else:
+ assert False, "Did not raise ChannelException!"
+
+ def test_server_rejects_arbitrary_global_request_without_auth(self):
+ self.setup_test_server(connect_kwargs={})
+ # NOTE: this dummy global request kind would normally pass muster
+ # from the test server.
+ self.tc.global_request("acceptable")
+ # Global requests never raise exceptions, even on failure (not sure why
+ # this was the original design...ugh.) Best we can do to tell failure
+ # happened is that the client transport's global_response was set back
+ # to None; if it had succeeded, it would be the response Message.
+ err = "Unauthed global response incorrectly succeeded!"
+ assert self.tc.global_response is None, err
+
+ def test_server_rejects_port_forward_without_auth(self):
+ # NOTE: at protocol level port forward requests are treated same as a
+ # regular global request, but Paramiko server implements a special-case
+ # method for it, so it gets its own test. (plus, THAT actually raises
+ # an exception on the client side, unlike the general case...)
+ self.setup_test_server(connect_kwargs={})
+ try:
+ self.tc.request_port_forward("localhost", 1234)
+ except SSHException as e:
+ assert "forwarding request denied" in str(e)
+ else:
+ assert False, "Did not raise SSHException!"
+
+ def _send_unimplemented(self, server_is_sender):
+ self.setup_test_server()
+ sender, recipient = self.tc, self.ts
+ if server_is_sender:
+ sender, recipient = self.ts, self.tc
+ recipient._send_message = Mock()
+ msg = Message()
+ msg.add_byte(cMSG_UNIMPLEMENTED)
+ sender._send_message(msg)
+ # TODO: I hate this but I literally don't see a good way to know when
+ # the recipient has received the sender's message (there are no
+ # existing threading events in play that work for this), esp in this
+ # case where we don't WANT a response (as otherwise we could
+ # potentially try blocking on the sender's receipt of a reply...maybe).
+ time.sleep(0.1)
+ assert not recipient._send_message.called
+
+ def test_server_does_not_respond_to_MSG_UNIMPLEMENTED(self):
+ self._send_unimplemented(server_is_sender=False)
+
+ def test_client_does_not_respond_to_MSG_UNIMPLEMENTED(self):
+ self._send_unimplemented(server_is_sender=True)
+
+ def _send_client_message(self, message_type):
+ self.setup_test_server(connect_kwargs={})
+ self.ts._send_message = Mock()
+ # NOTE: this isn't 100% realistic (most of these message types would
+ # have actual other fields in 'em) but it suffices to test the level of
+ # message dispatch we're interested in here.
+ msg = Message()
+ # TODO: really not liking the whole cMSG_XXX vs MSG_XXX duality right
+ # now, esp since the former is almost always just byte_chr(the
+ # latter)...but since that's the case...
+ msg.add_byte(byte_chr(message_type))
+ self.tc._send_message(msg)
+ # No good way to actually wait for server action (see above tests re:
+ # MSG_UNIMPLEMENTED). Grump.
+ time.sleep(0.1)
+
+ def _expect_unimplemented(self):
+ # Ensure MSG_UNIMPLEMENTED was sent (implies it hit end of loop instead
+ # of truly handling the given message).
+ # NOTE: When bug present, this will actually be the first thing that
+ # fails (since in many cases actual message handling doesn't involve
+ # sending a message back right away).
+ assert self.ts._send_message.call_count == 1
+ reply = self.ts._send_message.call_args[0][0]
+ reply.rewind() # Because it's pre-send, not post-receive
+ assert reply.get_byte() == cMSG_UNIMPLEMENTED
+
+ def test_server_transports_reject_client_message_types(self):
+ # TODO: handle Transport's own tables too, not just its inner auth
+ # handler's table. See TODOs in auth_handler.py
+ some_handler = self._auth_handler_class(self.tc)
+ for message_type in some_handler._client_handler_table:
+ self._send_client_message(message_type)
+ self._expect_unimplemented()
+ # Reset for rest of loop
+ self.tearDown()
+ self.setUp()
+
+ def test_server_rejects_client_MSG_USERAUTH_SUCCESS(self):
+ self._send_client_message(MSG_USERAUTH_SUCCESS)
+ # Sanity checks
+ assert not self.ts.authenticated
+ assert not self.ts.auth_handler.authenticated
+ # Real fix's behavior
+ self._expect_unimplemented()
+
+ def test_can_override_packetizer_used(self):
+ class MyPacketizer(Packetizer):
+ pass
+
+ # control case
+ assert Transport(sock=LoopSocket()).packetizer.__class__ is Packetizer
+ # overridden case
+ tweaked = Transport(sock=LoopSocket(), packetizer_class=MyPacketizer)
+ assert tweaked.packetizer.__class__ is MyPacketizer
+
+
+# TODO: for now this is purely a regression test. It needs actual tests of the
+# intentional new behavior too!
+class ServiceRequestingTransportTest(TransportTest):
+ _auth_handler_class = AuthOnlyHandler
+
+ def setUp(self):
+ # Copypasta (Transport init is load-bearing)
+ self.socks = LoopSocket()
+ self.sockc = LoopSocket()
+ self.sockc.link(self.socks)
+ # New class who dis
+ self.tc = ServiceRequestingTransport(self.sockc)
+ self.ts = ServiceRequestingTransport(self.socks)
+
+
+class AlgorithmDisablingTests(unittest.TestCase):
+ def test_preferred_lists_default_to_private_attribute_contents(self):
+ t = Transport(sock=Mock())
+ assert t.preferred_ciphers == t._preferred_ciphers
+ assert t.preferred_macs == t._preferred_macs
+ assert t.preferred_keys == tuple(
+ t._preferred_keys
+ + tuple(
+ "{}-cert-v01@openssh.com".format(x) for x in t._preferred_keys
+ )
+ )
+ assert t.preferred_kex == t._preferred_kex
+
+ def test_preferred_lists_filter_disabled_algorithms(self):
+ t = Transport(
+ sock=Mock(),
+ disabled_algorithms={
+ "ciphers": ["aes128-cbc"],
+ "macs": ["hmac-md5"],
+ "keys": ["ssh-dss"],
+ "kex": ["diffie-hellman-group14-sha256"],
+ },
+ )
+ assert "aes128-cbc" in t._preferred_ciphers
+ assert "aes128-cbc" not in t.preferred_ciphers
+ assert "hmac-md5" in t._preferred_macs
+ assert "hmac-md5" not in t.preferred_macs
+ assert "ssh-dss" in t._preferred_keys
+ assert "ssh-dss" not in t.preferred_keys
+ assert "ssh-dss-cert-v01@openssh.com" not in t.preferred_keys
+ assert "diffie-hellman-group14-sha256" in t._preferred_kex
+ assert "diffie-hellman-group14-sha256" not in t.preferred_kex
+
+ def test_implementation_refers_to_public_algo_lists(self):
+ t = Transport(
+ sock=Mock(),
+ disabled_algorithms={
+ "ciphers": ["aes128-cbc"],
+ "macs": ["hmac-md5"],
+ "keys": ["ssh-dss"],
+ "kex": ["diffie-hellman-group14-sha256"],
+ "compression": ["zlib"],
+ },
+ )
+ # Enable compression cuz otherwise disabling one option for it makes no
+ # sense...
+ t.use_compression(True)
+ # Effectively a random spot check, but kex init touches most/all of the
+ # algorithm lists so it's a good spot.
+ t._send_message = Mock()
+ t._send_kex_init()
+ # Cribbed from Transport._parse_kex_init, which didn't feel worth
+ # refactoring given all the vars involved :(
+ m = t._send_message.call_args[0][0]
+ m.rewind()
+ m.get_byte() # the msg type
+ m.get_bytes(16) # cookie, discarded
+ kexen = m.get_list()
+ server_keys = m.get_list()
+ ciphers = m.get_list()
+ m.get_list()
+ macs = m.get_list()
+ m.get_list()
+ compressions = m.get_list()
+ # OK, now we can actually check that our disabled algos were not
+ # included (as this message includes the full lists)
+ assert "aes128-cbc" not in ciphers
+ assert "hmac-md5" not in macs
+ assert "ssh-dss" not in server_keys
+ assert "diffie-hellman-group14-sha256" not in kexen
+ assert "zlib" not in compressions
+
+
+class TestSHA2SignatureKeyExchange(unittest.TestCase):
+ # NOTE: these all rely on the default server() hostkey being RSA
+ # NOTE: these rely on both sides being properly implemented re: agreed-upon
+ # hostkey during kex being what's actually used. Truly proving that eg
+ # SHA512 was used, is quite difficult w/o super gross hacks. However, there
+ # are new tests in test_pkey.py which use known signature blobs to prove
+ # the SHA2 family was in fact used!
+
+ @requires_sha1_signing
+ def test_base_case_ssh_rsa_still_used_as_fallback(self):
+ # Prove that ssh-rsa is used if either, or both, participants have SHA2
+ # algorithms disabled
+ for which in ("init", "client_init", "server_init"):
+ with server(**{which: _disable_sha2}) as (tc, _):
+ assert tc.host_key_type == "ssh-rsa"
+
+ def test_kex_with_sha2_512(self):
+ # It's the default!
+ with server() as (tc, _):
+ assert tc.host_key_type == "rsa-sha2-512"
+
+ def test_kex_with_sha2_256(self):
+ # No 512 -> you get 256
+ with server(
+ init=dict(disabled_algorithms=dict(keys=["rsa-sha2-512"]))
+ ) as (tc, _):
+ assert tc.host_key_type == "rsa-sha2-256"
+
+ def _incompatible_peers(self, client_init, server_init):
+ with server(
+ client_init=client_init, server_init=server_init, catch_error=True
+ ) as (tc, ts, err):
+ # If neither side blew up then that's bad!
+ assert err is not None
+ # If client side blew up first, it'll be straightforward
+ if isinstance(err, IncompatiblePeer):
+ pass
+ # If server side blew up first, client sees EOF & we need to check
+ # the server transport for its saved error (otherwise it can only
+ # appear in log output)
+ elif isinstance(err, EOFError):
+ assert ts.saved_exception is not None
+ assert isinstance(ts.saved_exception, IncompatiblePeer)
+ # If it was something else, welp
+ else:
+ raise err
+
+ def test_client_sha2_disabled_server_sha1_disabled_no_match(self):
+ self._incompatible_peers(
+ client_init=_disable_sha2, server_init=_disable_sha1
+ )
+
+ def test_client_sha1_disabled_server_sha2_disabled_no_match(self):
+ self._incompatible_peers(
+ client_init=_disable_sha1, server_init=_disable_sha2
+ )
+
+ def test_explicit_client_hostkey_not_limited(self):
+ # Be very explicit about the hostkey on BOTH ends,
+ # and ensure it still ends up choosing sha2-512.
+ # (This is a regression test vs previous implementation which overwrote
+ # the entire preferred-hostkeys structure when given an explicit key as
+ # a client.)
+ hostkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ connect = dict(
+ hostkey=hostkey, username="slowdive", password="pygmalion"
+ )
+ with server(hostkey=hostkey, connect=connect) as (tc, _):
+ assert tc.host_key_type == "rsa-sha2-512"
+
+
+class TestExtInfo(unittest.TestCase):
+ def test_ext_info_handshake_exposed_in_client_kexinit(self):
+ with server() as (tc, _):
+ # NOTE: this is latest KEXINIT /sent by us/ (Transport retains it)
+ kex = tc._get_latest_kex_init()
+ # flag in KexAlgorithms list
+ assert "ext-info-c" in kex["kex_algo_list"]
+ # data stored on Transport after hearing back from a compatible
+ # server (such as ourselves in server mode)
+ assert tc.server_extensions == {
+ "server-sig-algs": b"ssh-ed25519,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,rsa-sha2-512,rsa-sha2-256,ssh-rsa,ssh-dss" # noqa
+ }
+
+ def test_client_uses_server_sig_algs_for_pubkey_auth(self):
+ privkey = RSAKey.from_private_key_file(_support("rsa.key"))
+ with server(
+ pubkeys=[privkey],
+ connect=dict(pkey=privkey),
+ server_init=dict(
+ disabled_algorithms=dict(pubkeys=["rsa-sha2-512"])
+ ),
+ ) as (tc, _):
+ assert tc.is_authenticated()
+ # Client settled on 256 despite itself not having 512 disabled (and
+ # otherwise, 512 would have been earlier in the preferred list)
+ assert tc._agreed_pubkey_algorithm == "rsa-sha2-256"
+
+
+class BadSeqPacketizer(Packetizer):
+ def read_message(self):
+ cmd, msg = super().read_message()
+ # Only mess w/ seqno if kexinit.
+ if cmd is MSG_KEXINIT:
+ # NOTE: this is /only/ the copy of the seqno which gets
+ # transmitted up from Packetizer; it's not modifying
+ # Packetizer's own internal seqno. For these tests,
+ # modifying the latter isn't required, and is also harder
+ # to do w/o triggering MAC mismatches.
+ msg.seqno = 17 # arbitrary nonzero int
+ return cmd, msg
+
+
+class TestStrictKex:
+ def test_kex_algos_includes_kex_strict_c(self):
+ with server() as (tc, _):
+ kex = tc._get_latest_kex_init()
+ assert "kex-strict-c-v00@openssh.com" in kex["kex_algo_list"]
+
+ @mark.parametrize(
+ "server_active,client_active",
+ itertools.product([True, False], repeat=2),
+ )
+ def test_mode_agreement(self, server_active, client_active):
+ with server(
+ server_init=dict(strict_kex=server_active),
+ client_init=dict(strict_kex=client_active),
+ ) as (tc, ts):
+ if server_active and client_active:
+ assert tc.agreed_on_strict_kex is True
+ assert ts.agreed_on_strict_kex is True
+ else:
+ assert tc.agreed_on_strict_kex is False
+ assert ts.agreed_on_strict_kex is False
+
+ def test_mode_advertised_by_default(self):
+ # NOTE: no explicit strict_kex overrides...
+ with server() as (tc, ts):
+ assert all(
+ (
+ tc.advertise_strict_kex,
+ tc.agreed_on_strict_kex,
+ ts.advertise_strict_kex,
+ ts.agreed_on_strict_kex,
+ )
+ )
+
+ @mark.parametrize(
+ "ptype",
+ (
+ # "normal" but definitely out-of-order message
+ MSG_CHANNEL_OPEN,
+ # Normally ignored, but not in this case
+ MSG_IGNORE,
+ # Normally triggers debug parsing, but not in this case
+ MSG_DEBUG,
+ # Normally ignored, but...you get the idea
+ MSG_UNIMPLEMENTED,
+ # Not real, so would normally trigger us /sending/
+ # MSG_UNIMPLEMENTED, but...
+ MSG_FUGGEDABOUTIT,
+ ),
+ )
+ def test_MessageOrderError_non_kex_messages_in_initial_kex(self, ptype):
+ class AttackTransport(Transport):
+ # Easiest apparent spot on server side which is:
+ # - late enough for both ends to have handshook on strict mode
+ # - early enough to be in the window of opportunity for Terrapin
+ # attack; essentially during actual kex, when the engine is
+ # waiting for things like MSG_KEXECDH_REPLY (for eg curve25519).
+ def _negotiate_keys(self, m):
+ self.clear_to_send_lock.acquire()
+ try:
+ self.clear_to_send.clear()
+ finally:
+ self.clear_to_send_lock.release()
+ if self.local_kex_init is None:
+ # remote side wants to renegotiate
+ self._send_kex_init()
+ self._parse_kex_init(m)
+ # Here, we would normally kick over to kex_engine, but instead
+ # we want the server to send the OOO message.
+ m = Message()
+ m.add_byte(byte_chr(ptype))
+ # rest of packet unnecessary...
+ self._send_message(m)
+
+ with raises(MessageOrderError):
+ with server(server_transport_factory=AttackTransport) as (tc, _):
+ pass # above should run and except during connect()
+
+ def test_SSHException_raised_on_out_of_order_messages_when_not_strict(
+ self,
+ ):
+ # This is kind of dumb (either situation is still fatal!) but whatever,
+ # may as well be strict with our new strict flag...
+ with raises(SSHException) as info: # would be true either way, but
+ with server(
+ client_init=dict(strict_kex=False),
+ ) as (tc, _):
+ tc._expect_packet(MSG_KEXINIT)
+ tc.open_session()
+ assert info.type is SSHException # NOT MessageOrderError!
+
+ def test_error_not_raised_when_kexinit_not_seq_0_but_unstrict(self):
+ with server(
+ client_init=dict(
+ # Disable strict kex
+ strict_kex=False,
+ # Give our clientside a packetizer that sets all kexinit
+ # Message objects to have .seqno==17, which would trigger the
+ # new logic if we'd forgotten to wrap it in strict-kex check
+ packetizer_class=BadSeqPacketizer,
+ ),
+ ):
+ pass # kexinit happens at connect...
+
+ def test_MessageOrderError_raised_when_kexinit_not_seq_0_and_strict(self):
+ with raises(MessageOrderError):
+ with server(
+ # Give our clientside a packetizer that sets all kexinit
+ # Message objects to have .seqno==17, which should trigger the
+ # new logic (given we are NOT disabling strict-mode)
+ client_init=dict(packetizer_class=BadSeqPacketizer),
+ ):
+ pass # kexinit happens at connect...
+
+ def test_sequence_numbers_reset_on_newkeys_when_strict(self):
+ with server(defer=True) as (tc, ts):
+ # When in strict mode, these should all be zero or close to it
+ # (post-kexinit, pre-auth).
+ # Server->client will be 1 (EXT_INFO got sent after NEWKEYS)
+ assert tc.packetizer._Packetizer__sequence_number_in == 1
+ assert ts.packetizer._Packetizer__sequence_number_out == 1
+ # Client->server will be 0
+ assert tc.packetizer._Packetizer__sequence_number_out == 0
+ assert ts.packetizer._Packetizer__sequence_number_in == 0
+
+ def test_sequence_numbers_not_reset_on_newkeys_when_not_strict(self):
+ with server(defer=True, client_init=dict(strict_kex=False)) as (
+ tc,
+ ts,
+ ):
+ # When not in strict mode, these will all be ~3-4 or so
+ # (post-kexinit, pre-auth). Not encoding exact values as it will
+ # change anytime we mess with the test harness...
+ assert tc.packetizer._Packetizer__sequence_number_in != 0
+ assert tc.packetizer._Packetizer__sequence_number_out != 0
+ assert ts.packetizer._Packetizer__sequence_number_in != 0
+ assert ts.packetizer._Packetizer__sequence_number_out != 0
+
+ def test_sequence_number_rollover_detected(self):
+ class RolloverTransport(Transport):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # Induce an about-to-rollover seqno, such that it rolls over
+ # during initial kex.
+ setattr(
+ self.packetizer,
+ "_Packetizer__sequence_number_in",
+ sys.maxsize,
+ )
+ setattr(
+ self.packetizer,
+ "_Packetizer__sequence_number_out",
+ sys.maxsize,
+ )
+
+ with raises(
+ SSHException,
+ match=r"Sequence number rolled over during initial kex!",
+ ):
+ with server(
+ client_init=dict(
+ # Disable strict kex - this should happen always
+ strict_kex=False,
+ ),
+ # Transport which tickles its packetizer seqno's
+ transport_factory=RolloverTransport,
+ ):
+ pass # kexinit happens at connect...
diff --git a/tests/test_util.py b/tests/test_util.py
new file mode 100644
index 0000000..a2a8224
--- /dev/null
+++ b/tests/test_util.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Some unit tests for utility functions.
+"""
+
+from binascii import hexlify
+import os
+from hashlib import sha1
+import unittest
+
+import paramiko
+import paramiko.util
+from paramiko.util import safe_string
+
+
+test_hosts_file = """\
+secure.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA1PD6U2/TVxET6lkpKhOk5r\
+9q/kAYG6sP9f5zuUYP8i7FOFp/6ncCEbbtg/lB+A3iidyxoSWl+9jtoyyDOOVX4UIDV9G11Ml8om3\
+D+jrpI9cycZHqilK0HmxDeCuxbwyMuaCygU9gS2qoRvNLWZk70OpIKSSpBo0Wl3/XUmz9uhc=
+happy.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA8bP1ZA7DCZDB9J0s50l31M\
+BGQ3GQ/Fc7SX6gkpXkwcZryoi4kNFhHu5LvHcZPdxXV1D+uTMfGS1eyd2Yz/DoNWXNAl8TI0cAsW\
+5ymME3bQ4J/k1IKxCtz/bAlAqFgKoc+EolMziDYqWIATtW0rYTJvzGAzTmMj80/QpsFH+Pc2M=
+"""
+
+
+class UtilTest(unittest.TestCase):
+ def test_imports(self):
+ """
+ verify that all the classes can be imported from paramiko.
+ """
+ for name in (
+ "Agent",
+ "AgentKey",
+ "AuthenticationException",
+ "AuthFailure",
+ "AuthHandler",
+ "AuthResult",
+ "AuthSource",
+ "AuthStrategy",
+ "AutoAddPolicy",
+ "BadAuthenticationType",
+ "BufferedFile",
+ "Channel",
+ "ChannelException",
+ "ConfigParseError",
+ "CouldNotCanonicalize",
+ "DSSKey",
+ "HostKeys",
+ "InMemoryPrivateKey",
+ "Message",
+ "MissingHostKeyPolicy",
+ "NoneAuth",
+ "OnDiskPrivateKey",
+ "Password",
+ "PasswordRequiredException",
+ "PrivateKey",
+ "RSAKey",
+ "RejectPolicy",
+ "SFTP",
+ "SFTPAttributes",
+ "SFTPClient",
+ "SFTPError",
+ "SFTPFile",
+ "SFTPHandle",
+ "SFTPServer",
+ "SFTPServerInterface",
+ "SSHClient",
+ "SSHConfig",
+ "SSHConfigDict",
+ "SSHException",
+ "SecurityOptions",
+ "ServerInterface",
+ "SourceResult",
+ "SubsystemHandler",
+ "Transport",
+ "WarningPolicy",
+ "util",
+ ):
+ assert name in dir(paramiko)
+
+ def test_generate_key_bytes(self):
+ key_bytes = paramiko.util.generate_key_bytes(
+ sha1, b"ABCDEFGH", "This is my secret passphrase.", 64
+ )
+ hexy = "".join([f"{byte:02x}" for byte in key_bytes])
+ hexpected = "9110e2f6793b69363e58173e9436b13a5a4b339005741d5c680e505f57d871347b4239f14fb5c46e857d5e100424873ba849ac699cea98d729e57b3e84378e8b" # noqa
+ assert hexy == hexpected
+
+ def test_host_keys(self):
+ with open("hostfile.temp", "w") as f:
+ f.write(test_hosts_file)
+ try:
+ hostdict = paramiko.util.load_host_keys("hostfile.temp")
+ assert 2 == len(hostdict)
+ assert 1 == len(list(hostdict.values())[0])
+ assert 1 == len(list(hostdict.values())[1])
+ fp = hexlify(
+ hostdict["secure.example.com"]["ssh-rsa"].get_fingerprint()
+ ).upper()
+ assert b"E6684DB30E109B67B70FF1DC5C7F1363" == fp
+ finally:
+ os.unlink("hostfile.temp")
+
+ def test_clamp_value(self):
+ assert 32768 == paramiko.util.clamp_value(32767, 32768, 32769)
+ assert 32767 == paramiko.util.clamp_value(32767, 32765, 32769)
+ assert 32769 == paramiko.util.clamp_value(32767, 32770, 32769)
+
+ def test_safe_string(self):
+ vanilla = b"vanilla"
+ has_bytes = b"has \7\3 bytes"
+ safe_vanilla = safe_string(vanilla)
+ safe_has_bytes = safe_string(has_bytes)
+ expected_bytes = b"has %07%03 bytes"
+ err = "{!r} != {!r}"
+ msg = err.format(safe_vanilla, vanilla)
+ assert safe_vanilla == vanilla, msg
+ msg = err.format(safe_has_bytes, expected_bytes)
+ assert safe_has_bytes == expected_bytes, msg