summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-10-09 06:19:26 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-10-09 06:19:26 +0000
commitaa33ca9a0cd102e3a696623db778a05905ef4ed0 (patch)
treef8af6bc4efe69c3137603c53816d794b52965e03
parentInitial commit. (diff)
downloadpsycopg2-aa33ca9a0cd102e3a696623db778a05905ef4ed0.tar.xz
psycopg2-aa33ca9a0cd102e3a696623db778a05905ef4ed0.zip
Adding upstream version 2.9.1.upstream/2.9.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--AUTHORS15
-rw-r--r--INSTALL4
-rw-r--r--LICENSE49
-rw-r--r--MANIFEST.in9
-rw-r--r--Makefile104
-rw-r--r--NEWS1432
-rw-r--r--PKG-INFO107
-rw-r--r--README.rst73
-rw-r--r--doc/COPYING.LESSER165
-rw-r--r--doc/Makefile39
-rw-r--r--doc/README.rst20
-rw-r--r--doc/SUCCESS114
-rw-r--r--doc/pep-0249.txt1005
-rw-r--r--doc/requirements.txt8
-rw-r--r--doc/src/Makefile99
-rw-r--r--doc/src/_static/psycopg.css136
-rw-r--r--doc/src/advanced.rst599
-rw-r--r--doc/src/conf.py288
-rw-r--r--doc/src/connection.rst916
-rw-r--r--doc/src/cursor.rst678
-rw-r--r--doc/src/errorcodes.rst76
-rw-r--r--doc/src/errors.rst83
-rw-r--r--doc/src/extensions.rst1010
-rw-r--r--doc/src/extras.rst1085
-rw-r--r--doc/src/faq.rst382
-rw-r--r--doc/src/index.rst70
-rw-r--r--doc/src/install.rst357
-rw-r--r--doc/src/license.rst7
-rw-r--r--doc/src/module.rst388
-rw-r--r--doc/src/news.rst8
-rw-r--r--doc/src/pool.rst60
-rw-r--r--doc/src/sql.rst147
-rwxr-xr-xdoc/src/tools/lib/dbapi_extension.py50
-rw-r--r--doc/src/tools/lib/sql_role.py19
-rw-r--r--doc/src/tools/lib/ticket_role.py57
-rw-r--r--doc/src/tools/make_sqlstate_docs.py57
-rw-r--r--doc/src/tz.rst19
-rw-r--r--doc/src/usage.rst1106
-rw-r--r--lib/__init__.py126
-rw-r--r--lib/_ipaddress.py90
-rw-r--r--lib/_json.py199
-rw-r--r--lib/_range.py537
-rw-r--r--lib/errorcodes.py447
-rw-r--r--lib/errors.py38
-rw-r--r--lib/extensions.py213
-rw-r--r--lib/extras.py1306
-rw-r--r--lib/pool.py187
-rw-r--r--lib/sql.py455
-rw-r--r--lib/tz.py158
-rw-r--r--psycopg/_psycopg.vc9.amd64.manifest15
-rw-r--r--psycopg/_psycopg.vc9.x86.manifest15
-rw-r--r--psycopg/adapter_asis.c195
-rw-r--r--psycopg/adapter_asis.h48
-rw-r--r--psycopg/adapter_binary.c281
-rw-r--r--psycopg/adapter_binary.h48
-rw-r--r--psycopg/adapter_datetime.c515
-rw-r--r--psycopg/adapter_datetime.h107
-rw-r--r--psycopg/adapter_list.c342
-rw-r--r--psycopg/adapter_list.h47
-rw-r--r--psycopg/adapter_pboolean.c185
-rw-r--r--psycopg/adapter_pboolean.h48
-rw-r--r--psycopg/adapter_pdecimal.c248
-rw-r--r--psycopg/adapter_pdecimal.h48
-rw-r--r--psycopg/adapter_pfloat.c221
-rw-r--r--psycopg/adapter_pfloat.h48
-rw-r--r--psycopg/adapter_pint.c222
-rw-r--r--psycopg/adapter_pint.h48
-rw-r--r--psycopg/adapter_qstring.c307
-rw-r--r--psycopg/adapter_qstring.h52
-rw-r--r--psycopg/aix_support.c58
-rw-r--r--psycopg/aix_support.h48
-rw-r--r--psycopg/bytes_format.c309
-rw-r--r--psycopg/column.h49
-rw-r--r--psycopg/column_type.c420
-rw-r--r--psycopg/config.h216
-rw-r--r--psycopg/connection.h229
-rw-r--r--psycopg/connection_int.c1553
-rw-r--r--psycopg/connection_type.c1517
-rw-r--r--psycopg/conninfo.h41
-rw-r--r--psycopg/conninfo_type.c648
-rw-r--r--psycopg/cursor.h147
-rw-r--r--psycopg/cursor_int.c171
-rw-r--r--psycopg/cursor_type.c2126
-rw-r--r--psycopg/diagnostics.h41
-rw-r--r--psycopg/diagnostics_type.c208
-rw-r--r--psycopg/error.h46
-rw-r--r--psycopg/error_type.c376
-rw-r--r--psycopg/green.c210
-rw-r--r--psycopg/green.h76
-rw-r--r--psycopg/libpq_support.c105
-rw-r--r--psycopg/libpq_support.h49
-rw-r--r--psycopg/lobject.h102
-rw-r--r--psycopg/lobject_int.c486
-rw-r--r--psycopg/lobject_type.c471
-rw-r--r--psycopg/microprotocols.c277
-rw-r--r--psycopg/microprotocols.h64
-rw-r--r--psycopg/microprotocols_proto.c180
-rw-r--r--psycopg/microprotocols_proto.h47
-rw-r--r--psycopg/notify.h41
-rw-r--r--psycopg/notify_type.c298
-rw-r--r--psycopg/pgtypes.h65
-rw-r--r--psycopg/pqpath.c1834
-rw-r--r--psycopg/pqpath.h74
-rw-r--r--psycopg/psycopg.h107
-rw-r--r--psycopg/psycopgmodule.c1030
-rw-r--r--psycopg/python.h99
-rw-r--r--psycopg/replication_connection.h53
-rw-r--r--psycopg/replication_connection_type.c193
-rw-r--r--psycopg/replication_cursor.h66
-rw-r--r--psycopg/replication_cursor_type.c394
-rw-r--r--psycopg/replication_message.h58
-rw-r--r--psycopg/replication_message_type.c195
-rw-r--r--psycopg/solaris_support.c58
-rw-r--r--psycopg/solaris_support.h48
-rw-r--r--psycopg/sqlstate_errors.h335
-rw-r--r--psycopg/typecast.c620
-rw-r--r--psycopg/typecast.h91
-rw-r--r--psycopg/typecast_array.c298
-rw-r--r--psycopg/typecast_basic.c150
-rw-r--r--psycopg/typecast_binary.c275
-rw-r--r--psycopg/typecast_binary.h50
-rw-r--r--psycopg/typecast_builtins.c71
-rw-r--r--psycopg/typecast_datetime.c486
-rw-r--r--psycopg/utils.c456
-rw-r--r--psycopg/utils.h65
-rw-r--r--psycopg/win32_support.c90
-rw-r--r--psycopg/win32_support.h56
-rw-r--r--psycopg/xid.h52
-rw-r--r--psycopg/xid_type.c665
-rw-r--r--psycopg2.egg-info/PKG-INFO107
-rw-r--r--psycopg2.egg-info/SOURCES.txt176
-rw-r--r--psycopg2.egg-info/dependency_links.txt1
-rw-r--r--psycopg2.egg-info/top_level.txt1
-rwxr-xr-xscripts/build/appveyor.py848
-rwxr-xr-xscripts/build/build_libpq.sh130
-rwxr-xr-xscripts/build/build_macos.sh79
-rwxr-xr-xscripts/build/build_manylinux2014.sh75
-rwxr-xr-xscripts/build/build_manylinux_2_24.sh75
-rwxr-xr-xscripts/build/build_sdist.sh26
-rwxr-xr-xscripts/build/download_packages_appveyor.py103
-rwxr-xr-xscripts/build/download_packages_github.py99
-rwxr-xr-xscripts/make_errorcodes.py152
-rwxr-xr-xscripts/make_errors.py137
-rwxr-xr-xscripts/refcounter.py107
-rwxr-xr-xscripts/travis_update_docs.sh23
-rw-r--r--setup.cfg14
-rw-r--r--setup.py572
-rwxr-xr-xtests/__init__.py104
-rw-r--r--tests/dbapi20.py862
-rw-r--r--tests/dbapi20_tpc.py144
-rwxr-xr-xtests/test_async.py546
-rwxr-xr-xtests/test_bugX000.py48
-rwxr-xr-xtests/test_bug_gc.py52
-rwxr-xr-xtests/test_cancel.py117
-rwxr-xr-xtests/test_connection.py1944
-rwxr-xr-xtests/test_copy.py404
-rwxr-xr-xtests/test_cursor.py701
-rwxr-xr-xtests/test_dates.py555
-rwxr-xr-xtests/test_errcodes.py74
-rwxr-xr-xtests/test_errors.py94
-rwxr-xr-xtests/test_extras_dictcursor.py646
-rwxr-xr-xtests/test_fast_executemany.py269
-rwxr-xr-xtests/test_green.py246
-rwxr-xr-xtests/test_ipaddress.py120
-rwxr-xr-xtests/test_lobject.py530
-rwxr-xr-xtests/test_module.py367
-rwxr-xr-xtests/test_notify.py231
-rwxr-xr-xtests/test_psycopg2_dbapi20.py86
-rwxr-xr-xtests/test_quote.py229
-rwxr-xr-xtests/test_replication.py276
-rwxr-xr-xtests/test_sql.py412
-rwxr-xr-xtests/test_transaction.py258
-rwxr-xr-xtests/test_types_basic.py492
-rwxr-xr-xtests/test_types_extras.py1597
-rwxr-xr-xtests/test_with.py319
-rw-r--r--tests/testconfig.py42
-rw-r--r--tests/testutils.py544
177 files changed, 51314 insertions, 0 deletions
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..60677dd
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,15 @@
+Main authors:
+ Federico Di Gregorio <fog@debian.org>
+ Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+For the win32 port:
+ Jason Erickson <jerickso@indian.com>
+
+Additional Help:
+
+ Peter Fein contributed a logging connection/cursor class that even if it
+ was not used directly heavily influenced the implementation currently in
+ psycopg2.extras.
+
+ Jan Urbański (re)started the work on asynchronous queries and contributed
+ both on that and on other parts of psycopg2.
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..7a04aab
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,4 @@
+Installation instructions are included in the docs.
+
+Please check the 'doc/src/install.rst' file or online at
+<https://www.psycopg.org/docs/install.html>.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..9029e70
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,49 @@
+psycopg2 and the LGPL
+---------------------
+
+psycopg2 is free software: you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+In addition, as a special exception, the copyright holders give
+permission to link this program with the OpenSSL library (or with
+modified versions of OpenSSL that use the same license as OpenSSL),
+and distribute linked combinations including the two.
+
+You must obey the GNU Lesser General Public License in all respects for
+all of the code used other than OpenSSL. If you modify file(s) with this
+exception, you may extend this exception to your version of the file(s),
+but you are not obligated to do so. If you do not wish to do so, delete
+this exception statement from your version. If you delete this exception
+statement from all source files in the program, then also delete it here.
+
+You should have received a copy of the GNU Lesser General Public License
+along with psycopg2 (see the doc/ directory.)
+If not, see <https://www.gnu.org/licenses/>.
+
+
+Alternative licenses
+--------------------
+
+The following BSD-like license applies (at your option) to the files following
+the pattern ``psycopg/adapter*.{h,c}`` and ``psycopg/microprotocol*.{h,c}``:
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product documentation
+ would be appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not
+ be misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source distribution.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..3fcce43
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,9 @@
+recursive-include psycopg *.c *.h *.manifest
+recursive-include lib *.py
+recursive-include tests *.py
+include doc/README.rst doc/SUCCESS doc/COPYING.LESSER doc/pep-0249.txt
+include doc/Makefile doc/requirements.txt
+recursive-include doc/src *.rst *.py *.css Makefile
+recursive-include scripts *.py *.sh
+include AUTHORS README.rst INSTALL LICENSE NEWS
+include MANIFEST.in setup.py setup.cfg Makefile
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..9cbb16f
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,104 @@
+# Makefile for psycopg2. Do you want to...
+#
+# Build the library::
+#
+# make
+#
+# Build the documentation::
+#
+# make env (once)
+# make docs
+#
+# Create a source package::
+#
+# make sdist
+#
+# Run the test::
+#
+# make check # this requires setting up a test database with the correct user
+
+PYTHON := python$(PYTHON_VERSION)
+PYTHON_VERSION ?= $(shell $(PYTHON) -c 'import sys; print ("%d.%d" % sys.version_info[:2])')
+BUILD_DIR = $(shell pwd)/build/lib.$(PYTHON_VERSION)
+
+SOURCE_C := $(wildcard psycopg/*.c psycopg/*.h)
+SOURCE_PY := $(wildcard lib/*.py)
+SOURCE_TESTS := $(wildcard tests/*.py)
+SOURCE_DOC := $(wildcard doc/src/*.rst)
+SOURCE := $(SOURCE_C) $(SOURCE_PY) $(SOURCE_TESTS) $(SOURCE_DOC)
+
+PACKAGE := $(BUILD_DIR)/psycopg2
+PLATLIB := $(PACKAGE)/_psycopg.so
+PURELIB := $(patsubst lib/%,$(PACKAGE)/%,$(SOURCE_PY))
+
+BUILD_OPT := --build-lib=$(BUILD_DIR)
+BUILD_EXT_OPT := --build-lib=$(BUILD_DIR)
+SDIST_OPT := --formats=gztar
+
+ifdef PG_CONFIG
+ BUILD_EXT_OPT += --pg-config=$(PG_CONFIG)
+endif
+
+VERSION := $(shell grep PSYCOPG_VERSION setup.py | head -1 | sed -e "s/.*'\(.*\)'/\1/")
+SDIST := dist/psycopg2-$(VERSION).tar.gz
+
+.PHONY: check clean
+
+default: package
+
+all: package sdist
+
+package: $(PLATLIB) $(PURELIB)
+
+docs: docs-html
+
+docs-html: doc/html/genindex.html
+
+# for PyPI documentation
+docs-zip: doc/docs.zip
+
+sdist: $(SDIST)
+
+env:
+ $(MAKE) -C doc $@
+
+check:
+ PYTHONPATH=$(BUILD_DIR) $(PYTHON) -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" --verbose
+
+testdb:
+ @echo "* Creating $(TESTDB)"
+ @if psql -l | grep -q " $(TESTDB) "; then \
+ dropdb $(TESTDB) >/dev/null; \
+ fi
+ createdb $(TESTDB)
+ # Note to packagers: this requires the postgres user running the test
+ # to be a superuser. You may change this line to use the superuser only
+ # to install the contrib. Feel free to suggest a better way to set up the
+ # testing environment (as the current is enough for development).
+ psql -f `pg_config --sharedir`/contrib/hstore.sql $(TESTDB)
+
+
+$(PLATLIB): $(SOURCE_C)
+ $(PYTHON) setup.py build_ext $(BUILD_EXT_OPT)
+
+$(PACKAGE)/%.py: lib/%.py
+ $(PYTHON) setup.py build_py $(BUILD_OPT)
+ touch $@
+
+$(PACKAGE)/tests/%.py: tests/%.py
+ $(PYTHON) setup.py build_py $(BUILD_OPT)
+ touch $@
+
+$(SDIST): $(SOURCE)
+ $(PYTHON) setup.py sdist $(SDIST_OPT)
+
+# docs depend on the build as it partly use introspection.
+doc/html/genindex.html: $(PLATLIB) $(PURELIB) $(SOURCE_DOC)
+ $(MAKE) -C doc html
+
+doc/docs.zip: doc/html/genindex.html
+ (cd doc/html && zip -r ../docs.zip *)
+
+clean:
+ rm -rf build
+ $(MAKE) -C doc clean
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..c6f9ab1
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,1432 @@
+Current release
+---------------
+
+What's new in psycopg 2.9.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Fix regression with named `sql.Placeholder` (:ticket:`1291`).
+
+
+What's new in psycopg 2.9
+-------------------------
+
+- ``with connection`` starts a transaction on autocommit transactions too
+ (:ticket:`#941`).
+- Timezones with fractional minutes are supported on Python 3.7 and following
+ (:ticket:`#1272`).
+- Escape table and column names in `~cursor.copy_from()` and
+ `~cursor.copy_to()`.
+- Connection exceptions with sqlstate ``08XXX`` reclassified as
+ `~psycopg2.OperationalError` (a subclass of the previously used
+ `~psycopg2.DatabaseError`) (:ticket:`#1148`).
+- Include library dirs required from libpq to work around MacOS build problems
+ (:ticket:`#1200`).
+
+Other changes:
+
+- Dropped support for Python 2.7, 3.4, 3.5 (:tickets:`#1198, #1000, #1197`).
+- Dropped support for mx.DateTime.
+- Use `datetime.timezone` objects by default in datetime objects instead of
+ `~psycopg2.tz.FixedOffsetTimezone`.
+- The `psycopg2.tz` module is deprecated and scheduled to be dropped in the
+ next major release.
+- Provide :pep:`599` wheels packages (manylinux2014 tag) for i686 and x86_64
+ platforms.
+- Provide :pep:`600` wheels packages (manylinux_2_24 tag) for aarch64 and
+ ppc64le platforms.
+- Wheel package compiled against OpenSSL 1.1.1k and PostgreSQL 13.3.
+- Build system for Linux/MacOS binary packages moved to GitHub Actions.
+
+
+What's new in psycopg 2.8.7
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Accept empty params as `~psycopg2.connect()` (:ticket:`#1250`).
+- Fix attributes refcount in `Column` initialisation (:ticket:`#1252`).
+- Allow re-initialisation of static variables in the C module (:ticket:`#1267`).
+
+
+What's new in psycopg 2.8.6
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed memory leak changing connection encoding to the current one
+ (:ticket:`#1101`).
+- Fixed search of mxDateTime headers in virtualenvs (:ticket:`#996`).
+- Added missing values from errorcodes (:ticket:`#1133`).
+- `cursor.query` reports the query of the last :sql:`COPY` operation too
+ (:ticket:`#1141`).
+- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to
+ PostgreSQL 13.
+- Added wheel packages for ARM architecture (:ticket:`#1125`).
+- Wheel package compiled against OpenSSL 1.1.1g.
+
+
+What's new in psycopg 2.8.5
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed use of `!connection_factory` and `!cursor_factory` together
+ (:ticket:`#1019`).
+- Added support for `~logging.LoggerAdapter` in
+ `~psycopg2.extras.LoggingConnection` (:ticket:`#1026`).
+- `~psycopg2.extensions.Column` objects in `cursor.description` can be sliced
+ (:ticket:`#1034`).
+- Added AIX support (:ticket:`#1061`).
+- Fixed `~copy.copy()` of `~psycopg2.extras.DictCursor` rows (:ticket:`#1073`).
+
+
+What's new in psycopg 2.8.4
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed building with Python 3.8 (:ticket:`#854`).
+- Don't swallow keyboard interrupts on connect when a password is specified
+ in the connection string (:ticket:`#898`).
+- Don't advance replication cursor when the message wasn't confirmed
+ (:ticket:`#940`).
+- Fixed inclusion of ``time.h`` on linux (:ticket:`#951`).
+- Fixed int overflow for large values in `~psycopg2.extensions.Column.table_oid`
+ and `~psycopg2.extensions.Column.type_code` (:ticket:`#961`).
+- `~psycopg2.errorcodes` map and `~psycopg2.errors` classes updated to
+ PostgreSQL 12.
+- Wheel package compiled against OpenSSL 1.1.1d and PostgreSQL at least 11.4.
+
+
+What's new in psycopg 2.8.3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Added *interval_status* parameter to
+ `~psycopg2.extras.ReplicationCursor.start_replication()` method and other
+ facilities to send automatic replication keepalives at periodic intervals
+ (:ticket:`#913`).
+- Fixed namedtuples caching introduced in 2.8 (:ticket:`#928`).
+
+
+What's new in psycopg 2.8.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed `~psycopg2.extras.RealDictCursor` when there are repeated columns
+ (:ticket:`#884`).
+- Binary packages built with openssl 1.1.1b. Should fix concurrency problems
+ (:tickets:`#543, #836`).
+
+
+What's new in psycopg 2.8.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed `~psycopg2.extras.RealDictRow` modifiability (:ticket:`#886`).
+- Fixed "there's no async cursor" error polling a connection with no cursor
+ (:ticket:`#887`).
+
+
+What's new in psycopg 2.8
+-------------------------
+
+New features:
+
+- Added `~psycopg2.errors` module. Every PostgreSQL error is converted into
+ a specific exception class (:ticket:`#682`).
+- Added `~psycopg2.extensions.encrypt_password()` function (:ticket:`#576`).
+- Added `~psycopg2.extensions.BYTES` adapter to manage databases with mixed
+ encodings on Python 3 (:ticket:`#835`).
+- Added `~psycopg2.extensions.Column.table_oid` and
+ `~psycopg2.extensions.Column.table_column` attributes on `cursor.description`
+ items (:ticket:`#661`).
+- Added `connection.info` object to retrieve various PostgreSQL connection
+ information (:ticket:`#726`).
+- Added `~connection.get_native_connection()` to expose the raw ``PGconn``
+ structure to C extensions via Capsule (:ticket:`#782`).
+- Added `~connection.pgconn_ptr` and `~cursor.pgresult_ptr` to expose raw
+ C structures to Python and interact with libpq via ctypes (:ticket:`#782`).
+- `~psycopg2.sql.Identifier` can represent qualified names in SQL composition
+ (:ticket:`#732`).
+- Added `!ReplicationCursor`.\ `~psycopg2.extras.ReplicationCursor.wal_end`
+ attribute (:ticket:`#800`).
+- Added *fetch* parameter to `~psycopg2.extras.execute_values()` function
+ (:ticket:`#813`).
+- `!str()` on `~psycopg2.extras.Range` produces a human-readable representation
+ (:ticket:`#773`).
+- `~psycopg2.extras.DictCursor` and `~psycopg2.extras.RealDictCursor` rows
+ maintain columns order (:ticket:`#177`).
+- Added `~psycopg2.extensions.Diagnostics.severity_nonlocalized` attribute on
+ the `~psycopg2.extensions.Diagnostics` object (:ticket:`#783`).
+- More efficient `~psycopg2.extras.NamedTupleCursor` (:ticket:`#838`).
+
+Bug fixes:
+
+- Fixed connections occasionally broken by the unrelated use of the
+ multiprocessing module (:ticket:`#829`).
+- Fixed async communication blocking if results are returned in different
+ chunks, e.g. with notices interspersed to the results (:ticket:`#856`).
+- Fixed adaptation of numeric subclasses such as `~enum.IntEnum`
+ (:ticket:`#591`).
+
+Other changes:
+
+- Dropped support for Python 2.6, 3.2, 3.3.
+- Dropped `psycopg1` module.
+- Dropped deprecated `!register_tstz_w_secs()` (was previously a no-op).
+- Dropped deprecated `!PersistentConnectionPool`. This pool class was mostly
+ designed to interact with Zope. Use `!ZPsycopgDA.pool` instead.
+- Binary packages no longer installed by default. The 'psycopg2-binary'
+ package must be used explicitly.
+- Dropped `!PSYCOPG_DISPLAY_SIZE` build parameter.
+- Dropped support for mxDateTime as the default date and time adapter.
+ mxDatetime support continues to be available as an alternative to Python's
+ builtin datetime.
+- No longer use 2to3 during installation for Python 2 & 3 compatibility. All
+ source files are now compatible with Python 2 & 3 as is.
+- The `!psycopg2.test` package is no longer installed by ``python setup.py
+ install``.
+- Wheel package compiled against OpenSSL 1.0.2r and PostgreSQL 11.2 libpq.
+
+
+What's new in psycopg 2.7.7
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Cleanup of the cursor results assignment code, which might have solved
+ double free and inconsistencies in concurrent usage (:tickets:`#346, #384`).
+- Wheel package compiled against OpenSSL 1.0.2q.
+
+
+What's new in psycopg 2.7.6.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed binary package broken on OS X 10.12 (:ticket:`#807`).
+- Wheel package compiled against PostgreSQL 11.1 libpq.
+
+
+What's new in psycopg 2.7.6
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Close named cursors if exist, even if `~cursor.execute()` wasn't called
+ (:ticket:`#746`).
+- Fixed building on modern FreeBSD versions with Python 3.7 (:ticket:`#755`).
+- Fixed hang trying to :sql:`COPY` via `~cursor.execute()` in asynchronous
+ connections (:ticket:`#781`).
+- Fixed adaptation of arrays of empty arrays (:ticket:`#788`).
+- Fixed segfault accessing the connection's `~connection.readonly` and
+ `~connection.deferrable` attributes repeatedly (:ticket:`#790`).
+- `~psycopg2.extras.execute_values()` accepts `~psycopg2.sql.Composable`
+ objects (:ticket:`#794`).
+- `~psycopg2.errorcodes` map updated to PostgreSQL 11.
+- Wheel package compiled against PostgreSQL 10.5 libpq and OpenSSL 1.0.2p.
+
+
+What's new in psycopg 2.7.5
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Allow non-ascii chars in namedtuple fields (regression introduced fixing
+ :ticket:`#211`).
+- Fixed adaptation of arrays of arrays of nulls (:ticket:`#325`).
+- Fixed building on Solaris 11 and derivatives such as SmartOS and illumos
+ (:ticket:`#677`).
+- Maybe fixed building on MSYS2 (as reported in :ticket:`#658`).
+- Allow string subclasses in connection and other places (:ticket:`#679`).
+- Don't raise an exception closing an unused named cursor (:ticket:`#716`).
+- Wheel package compiled against PostgreSQL 10.4 libpq and OpenSSL 1.0.2o.
+
+
+What's new in psycopg 2.7.4
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Moving away from installing the wheel package by default.
+ Packages installed from wheel raise a warning on import. Added package
+ ``psycopg2-binary`` to install from wheel instead (:ticket:`#543`).
+- Convert fields names into valid Python identifiers in
+ `~psycopg2.extras.NamedTupleCursor` (:ticket:`#211`).
+- Fixed Solaris 10 support (:ticket:`#532`).
+- `cursor.mogrify()` can be called on closed cursors (:ticket:`#579`).
+- Fixed setting session characteristics in corner cases on autocommit
+ connections (:ticket:`#580`).
+- Fixed `~psycopg2.extras.MinTimeLoggingCursor` on Python 3 (:ticket:`#609`).
+- Fixed parsing of array of points as floats (:ticket:`#613`).
+- Fixed `~psycopg2.__libpq_version__` building with libpq >= 10.1
+ (:ticket:`#632`).
+- Fixed `~cursor.rowcount` after `~cursor.executemany()` with :sql:`RETURNING`
+ statements (:ticket:`#633`).
+- Fixed compatibility problem with pypy3 (:ticket:`#649`).
+- Wheel packages compiled against PostgreSQL 10.1 libpq and OpenSSL 1.0.2n.
+- Wheel packages for Python 2.6 no more available (support dropped from
+ wheel building infrastructure).
+
+
+What's new in psycopg 2.7.3.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Wheel package compiled against PostgreSQL 10.0 libpq and OpenSSL 1.0.2l
+ (:tickets:`#601, #602`).
+
+
+What's new in psycopg 2.7.3.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Dropped libresolv from wheel package to avoid incompatibility with
+ glibc 2.26 (wheels ticket #2).
+
+
+What's new in psycopg 2.7.3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Restored default :sql:`timestamptz[]` typecasting to Python `!datetime`.
+ Regression introduced in Psycopg 2.7.2 (:ticket:`#578`).
+
+
+What's new in psycopg 2.7.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed inconsistent state in externally closed connections
+ (:tickets:`#263, #311, #443`). Was fixed in 2.6.2 but not included in
+ 2.7 by mistake.
+- Fixed Python exceptions propagation in green callback (:ticket:`#410`).
+- Don't display the password in `connection.dsn` when the connection
+ string is specified as an URI (:ticket:`#528`).
+- Return objects with timezone parsing "infinity" :sql:`timestamptz`
+ (:ticket:`#536`).
+- Dropped dependency on VC9 runtime on Windows binary packages
+ (:ticket:`#541`).
+- Fixed segfault in `~connection.lobject()` when *mode*\=\ `!None`
+ (:ticket:`#544`).
+- Fixed `~connection.lobject()` keyword argument *lobject_factory*
+ (:ticket:`#545`).
+- Fixed `~psycopg2.extras.ReplicationCursor.consume_stream()`
+ *keepalive_interval* argument (:ticket:`#547`).
+- Maybe fixed random import error on Python 3.6 in multiprocess
+ environment (:ticket:`#550`).
+- Fixed random `!SystemError` upon receiving abort signal (:ticket:`#551`).
+- Accept `~psycopg2.sql.Composable` objects in
+ `~psycopg2.extras.ReplicationCursor.start_replication_expert()`
+ (:ticket:`#554`).
+- Parse intervals returned as microseconds from Redshift (:ticket:`#558`).
+- Added `~psycopg2.extras.Json` `!prepare()` method to consider connection
+ params when adapting (:ticket:`#562`).
+- `~psycopg2.errorcodes` map updated to PostgreSQL 10 beta 1.
+
+
+What's new in psycopg 2.7.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Ignore `!None` arguments passed to `~psycopg2.connect()` and
+ `~psycopg2.extensions.make_dsn()` (:ticket:`#517`).
+- OpenSSL upgraded from major version 0.9.8 to 1.0.2 in the Linux wheel
+ packages (:ticket:`#518`).
+- Fixed build with libpq versions < 9.3 (:ticket:`#520`).
+
+
+What's new in psycopg 2.7
+-------------------------
+
+New features:
+
+- Added `~psycopg2.sql` module to generate SQL dynamically (:ticket:`#308`).
+- Added :ref:`replication-support` (:ticket:`#322`). Main authors are
+ Oleksandr Shulgin and Craig Ringer, who deserve a huge thank you.
+- Added `~psycopg2.extensions.parse_dsn()` and
+ `~psycopg2.extensions.make_dsn()` functions (:tickets:`#321, #363`).
+ `~psycopg2.connect()` now can take both *dsn* and keyword arguments, merging
+ them together.
+- Added `~psycopg2.__libpq_version__` and
+ `~psycopg2.extensions.libpq_version()` to inspect the version of the
+ ``libpq`` library the module was compiled/loaded with
+ (:tickets:`#35, #323`).
+- The attributes `~connection.notices` and `~connection.notifies` can be
+ customized replacing them with any object exposing an `!append()` method
+ (:ticket:`#326`).
+- Adapt network types to `ipaddress` objects when available. When not
+ enabled, convert arrays of network types to lists by default. The old `!Inet`
+ adapter is deprecated (:tickets:`#317, #343, #387`).
+- Added `~psycopg2.extensions.quote_ident()` function (:ticket:`#359`).
+- Added `~connection.get_dsn_parameters()` connection method (:ticket:`#364`).
+- `~cursor.callproc()` now accepts a dictionary of parameters (:ticket:`#381`).
+- Give precedence to `!__conform__()` over superclasses to choose an object
+ adapter (:ticket:`#456`).
+- Using Python C API decoding functions and codecs caching for faster
+ unicode encoding/decoding (:ticket:`#473`).
+- `~cursor.executemany()` slowness addressed by
+ `~psycopg2.extras.execute_batch()` and `~psycopg2.extras.execute_values()`
+ (:ticket:`#491`).
+- Added ``async_`` as an alias for ``async`` to support Python 3.7 where
+ ``async`` will become a keyword (:ticket:`#495`).
+- Unless in autocommit, do not use :sql:`default_transaction_*` settings to
+ control the session characteristics as it may create problems with external
+ connection pools such as pgbouncer; use :sql:`BEGIN` options instead
+ (:ticket:`#503`).
+- `~connection.isolation_level` is now writable and entirely separated from
+ `~connection.autocommit`; added `~connection.readonly`,
+ `~connection.deferrable` writable attributes.
+
+Bug fixes:
+
+- Throw an exception trying to pass ``NULL`` chars as parameters
+ (:ticket:`#420`).
+- Fixed error caused by missing decoding `~psycopg2.extras.LoggingConnection`
+ (:ticket:`#483`).
+- Fixed integer overflow in :sql:`interval` seconds (:ticket:`#512`).
+- Make `~psycopg2.extras.Range` objects picklable (:ticket:`#462`).
+- Fixed version parsing and building with PostgreSQL 10 (:ticket:`#489`).
+
+Other changes:
+
+- Dropped support for Python 2.5 and 3.1.
+- Dropped support for client library older than PostgreSQL 9.1 (but older
+ server versions are still supported).
+- `~connection.isolation_level` doesn't read from the database but will return
+ `~psycopg2.extensions.ISOLATION_LEVEL_DEFAULT` if no value was set on the
+ connection.
+- Empty arrays no more converted into lists if they don't have a type attached
+ (:ticket:`#506`)
+
+
+What's new in psycopg 2.6.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed inconsistent state in externally closed connections
+ (:tickets:`#263, #311, #443`).
+- Report the server response status on errors (such as :ticket:`#281`).
+- Raise `!NotSupportedError` on unhandled server response status
+ (:ticket:`#352`).
+- Allow overriding string adapter encoding with no connection (:ticket:`#331`).
+- The `~psycopg2.extras.wait_select` callback allows interrupting a
+ long-running query in an interactive shell using :kbd:`Ctrl-C`
+ (:ticket:`#333`).
+- Fixed `!PersistentConnectionPool` on Python 3 (:ticket:`#348`).
+- Fixed segfault on `repr()` of an unitialized connection (:ticket:`#361`).
+- Allow adapting bytes using `~psycopg2.extensions.QuotedString` on Python 3
+ (:ticket:`#365`).
+- Added support for setuptools/wheel (:ticket:`#370`).
+- Fix build on Windows with Python 3.5, VS 2015 (:ticket:`#380`).
+- Fixed `!errorcodes.lookup` initialization thread-safety (:ticket:`#382`).
+- Fixed `!read()` exception propagation in copy_from (:ticket:`#412`).
+- Fixed possible NULL TZ decref (:ticket:`#424`).
+- `~psycopg2.errorcodes` map updated to PostgreSQL 9.5.
+
+
+What's new in psycopg 2.6.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Lists consisting of only `None` are escaped correctly (:ticket:`#285`).
+- Fixed deadlock in multithread programs using OpenSSL (:ticket:`#290`).
+- Correctly unlock the connection after error in flush (:ticket:`#294`).
+- Fixed `!MinTimeLoggingCursor.callproc()` (:ticket:`#309`).
+- Added support for MSVC 2015 compiler (:ticket:`#350`).
+
+
+What's new in psycopg 2.6
+-------------------------
+
+New features:
+
+- Added support for large objects larger than 2GB. Many thanks to Blake Rouse
+ and the MAAS Team for the feature development.
+- Python `time` objects with a tzinfo specified and PostgreSQL :sql:`timetz`
+ data are converted into each other (:ticket:`#272`).
+
+Bug fixes:
+
+- Json adapter's `!str()` returns the adapted content instead of the `!repr()`
+ (:ticket:`#191`).
+
+
+What's new in psycopg 2.5.5
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Named cursors used as context manager don't swallow the exception on exit
+ (:ticket:`#262`).
+- `cursor.description` can be pickled (:ticket:`#265`).
+- Propagate read error messages in COPY FROM (:ticket:`#270`).
+- PostgreSQL time 24:00 is converted to Python 00:00 (:ticket:`#278`).
+
+
+What's new in psycopg 2.5.4
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Added :sql:`jsonb` support for PostgreSQL 9.4 (:ticket:`#226`).
+- Fixed segfault if COPY statements are passed to `~cursor.execute()` instead
+ of using the proper methods (:ticket:`#219`).
+- Force conversion of pool arguments to integer to avoid potentially unbounded
+ pools (:ticket:`#220`).
+- Cursors :sql:`WITH HOLD` don't begin a new transaction upon move/fetch/close
+ (:ticket:`#228`).
+- Cursors :sql:`WITH HOLD` can be used in autocommit (:ticket:`#229`).
+- `~cursor.callproc()` doesn't silently ignore an argument without a length.
+- Fixed memory leak with large objects (:ticket:`#256`).
+- Make sure the internal ``_psycopg.so`` module can be imported stand-alone (to
+ allow modules juggling such as the one described in :ticket:`#201`).
+
+
+What's new in psycopg 2.5.3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Work around `pip issue #1630 <https://github.com/pypa/pip/issues/1630>`__
+ making installation via ``pip -e git+url`` impossible (:ticket:`#18`).
+- Copy operations correctly set the `cursor.rowcount` attribute
+ (:ticket:`#180`).
+- It is now possible to call `get_transaction_status()` on closed connections.
+- Fixed unsafe access to object names causing assertion failures in
+ Python 3 debug builds (:ticket:`#188`).
+- Mark the connection closed if found broken on `poll()` (from :ticket:`#192`
+ discussion)
+- Fixed handling of dsn and closed attributes in connection subclasses
+ failing to connect (from :ticket:`#192` discussion).
+- Added arbitrary but stable order to `Range` objects, thanks to
+ Chris Withers (:ticket:`#193`).
+- Avoid blocking async connections on connect (:ticket:`#194`). Thanks to
+ Adam Petrovich for the bug report and diagnosis.
+- Don't segfault using poorly defined cursor subclasses which forgot to call
+ the superclass init (:ticket:`#195`).
+- Mark the connection closed when a Socket connection is broken, as it
+ happens for TCP connections instead (:ticket:`#196`).
+- Fixed overflow opening a lobject with an oid not fitting in a signed int
+ (:ticket:`#203`).
+- Fixed handling of explicit default ``cursor_factory=None`` in
+ `connection.cursor()` (:ticket:`#210`).
+- Fixed possible segfault in named cursors creation.
+- Fixed debug build on Windows, thanks to James Emerton.
+
+
+What's new in psycopg 2.5.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed segfault pickling the exception raised on connection error
+ (:ticket:`#170`).
+- Meaningful connection errors report a meaningful message, thanks to
+ Alexey Borzenkov (:ticket:`#173`).
+- Manually creating `lobject` with the wrong parameter doesn't segfault
+ (:ticket:`#187`).
+
+
+What's new in psycopg 2.5.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed build on Solaris 10 and 11 where the round() function is already
+ declared (:ticket:`#146`).
+- Fixed comparison of `Range` with non-range objects (:ticket:`#164`).
+ Thanks to Chris Withers for the patch.
+- Fixed double-free on connection dealloc (:ticket:`#166`). Thanks to
+ Gangadharan S.A. for the report and fix suggestion.
+
+
+What's new in psycopg 2.5
+-------------------------
+
+New features:
+
+- Added :ref:`JSON adaptation <adapt-json>`.
+- Added :ref:`support for PostgreSQL 9.2 range types <adapt-range>`.
+- `connection` and `cursor` objects can be used in ``with`` statements
+ as context managers as specified by recent |DBAPI|_ extension.
+- Added `~psycopg2.extensions.Diagnostics` object to get extended info
+ from a database error. Many thanks to Matthew Woodcraft for the
+ implementation (:ticket:`#149`).
+- Added `connection.cursor_factory` attribute to customize the default
+ object returned by `~connection.cursor()`.
+- Added support for backward scrollable cursors. Thanks to Jon Nelson
+ for the initial patch (:ticket:`#108`).
+- Added a simple way to :ref:`customize casting of composite types
+ <adapt-composite>` into Python objects other than namedtuples.
+ Many thanks to Ronan Dunklau and Tobias Oberstein for the feature
+ development.
+- `connection.reset()` implemented using :sql:`DISCARD ALL` on server
+ versions supporting it.
+
+Bug fixes:
+
+- Properly cleanup memory of broken connections (:ticket:`#148`).
+- Fixed bad interaction of ``setup.py`` with other dependencies in
+ Distribute projects on Python 3 (:ticket:`#153`).
+
+Other changes:
+
+- Added support for Python 3.3.
+- Dropped support for Python 2.4. Please use Psycopg 2.4.x if you need it.
+- `~psycopg2.errorcodes` map updated to PostgreSQL 9.2.
+- Dropped Zope adapter from source repository. ZPsycopgDA now has its own
+ project at <https://github.com/psycopg/ZPsycopgDA>.
+
+
+What's new in psycopg 2.4.6
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed 'cursor()' arguments propagation in connection subclasses
+ and overriding of the 'cursor_factory' argument. Thanks to
+ Corry Haines for the report and the initial patch (:ticket:`#105`).
+- Dropped GIL release during string adaptation around a function call
+ invoking a Python API function, which could cause interpreter crash.
+ Thanks to Manu Cupcic for the report (:ticket:`#110`).
+- Close a green connection if there is an error in the callback.
+ Maybe a harsh solution but it leaves the program responsive
+ (:ticket:`#113`).
+- 'register_hstore()', 'register_composite()', 'tpc_recover()' work with
+ RealDictConnection and Cursor (:ticket:`#114`).
+- Fixed broken pool for Zope and connections re-init across ZSQL methods
+ in the same request (:tickets:`#123, #125, #142`).
+- connect() raises an exception instead of swallowing keyword arguments
+ when a connection string is specified as well (:ticket:`#131`).
+- Discard any result produced by 'executemany()' (:ticket:`#133`).
+- Fixed pickling of FixedOffsetTimezone objects (:ticket:`#135`).
+- Release the GIL around PQgetResult calls after COPY (:ticket:`#140`).
+- Fixed empty strings handling in composite caster (:ticket:`#141`).
+- Fixed pickling of DictRow and RealDictRow objects.
+
+
+What's new in psycopg 2.4.5
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- The close() methods on connections and cursors don't raise exceptions
+ if called on already closed objects.
+- Fixed fetchmany() with no argument in cursor subclasses
+ (:ticket:`#84`).
+- Use lo_creat() instead of lo_create() when possible for better
+ interaction with pgpool-II (:ticket:`#88`).
+- Error and its subclasses are picklable, useful for multiprocessing
+ interaction (:ticket:`#90`).
+- Better efficiency and formatting of timezone offset objects thanks
+ to Menno Smits (:tickets:`#94, #95`).
+- Fixed 'rownumber' during iteration on cursor subclasses.
+ Regression introduced in 2.4.4 (:ticket:`#100`).
+- Added support for 'inet' arrays.
+- Fixed 'commit()' concurrency problem (:ticket:`#103`).
+- Codebase cleaned up using the GCC Python plugin's static analysis
+ tool, which has revealed several unchecked return values, possible
+ NULL dereferences, reference counting problems. Many thanks to David
+ Malcolm for the useful tool and the assistance provided using it.
+
+
+What's new in psycopg 2.4.4
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- 'register_composite()' also works with the types implicitly defined
+ after a table row, not only with the ones created by 'CREATE TYPE'.
+- Values for the isolation level symbolic constants restored to what
+ they were before release 2.4.2 to avoid breaking apps using the
+ values instead of the constants.
+- Named DictCursor/RealDictCursor honour itersize (:ticket:`#80`).
+- Fixed rollback on error on Zope (:ticket:`#73`).
+- Raise 'DatabaseError' instead of 'Error' with empty libpq errors,
+ consistently with other disconnection-related errors: regression
+ introduced in release 2.4.1 (:ticket:`#82`).
+
+
+What's new in psycopg 2.4.3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- connect() supports all the keyword arguments supported by the
+ database
+- Added 'new_array_type()' function for easy creation of array
+ typecasters.
+- Added support for arrays of hstores and composite types (:ticket:`#66`).
+- Fixed segfault in case of transaction started with connection lost
+ (and possibly other events).
+- Fixed adaptation of Decimal type in sub-interpreters, such as in
+ certain mod_wsgi configurations (:ticket:`#52`).
+- Rollback connections in transaction or in error before putting them
+ back into a pool. Also discard broken connections (:ticket:`#62`).
+- Lazy import of the slow uuid module, thanks to Marko Kreen.
+- Fixed NamedTupleCursor.executemany() (:ticket:`#65`).
+- Fixed --static-libpq setup option (:ticket:`#64`).
+- Fixed interaction between RealDictCursor and named cursors
+ (:ticket:`#67`).
+- Dropped limit on the columns length in COPY operations (:ticket:`#68`).
+- Fixed reference leak with arguments referenced more than once
+ in queries (:ticket:`#81`).
+- Fixed typecasting of arrays containing consecutive backslashes.
+- 'errorcodes' map updated to PostgreSQL 9.1.
+
+
+What's new in psycopg 2.4.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Added 'set_session()' method and 'autocommit' property to the
+ connection. Added support for read-only sessions and, for PostgreSQL
+ 9.1, for the "repeatable read" isolation level and the "deferrable"
+ transaction property.
+- Psycopg doesn't execute queries at connection time to find the
+ default isolation level.
+- Fixed bug with multithread code potentially causing loss of sync
+ with the server communication or lock of the client (:ticket:`#55`).
+- Don't fail import if mx.DateTime module can't be found, even if its
+ support was built (:ticket:`#53`).
+- Fixed escape for negative numbers prefixed by minus operator
+ (:ticket:`#57`).
+- Fixed refcount issue during copy. Reported and fixed by Dave
+ Malcolm (:ticket:`#58`, Red Hat Bug 711095).
+- Trying to execute concurrent operations on the same connection
+ through concurrent green thread results in an error instead of a
+ deadlock.
+- Fixed detection of pg_config on Window. Report and fix, plus some
+ long needed setup.py cleanup by Steve Lacy: thanks!
+
+
+What's new in psycopg 2.4.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Use own parser for bytea output, not requiring anymore the libpq 9.0
+ to parse the hex format.
+- Don't fail connection if the client encoding is a non-normalized
+ variant. Issue reported by Peter Eisentraut.
+- Correctly detect an empty query sent to the backend (:ticket:`#46`).
+- Fixed a SystemError clobbering libpq errors raised without SQLSTATE.
+ Bug vivisectioned by Eric Snow.
+- Fixed interaction between NamedTuple and server-side cursors.
+- Allow to specify --static-libpq on setup.py command line instead of
+ just in 'setup.cfg'. Patch provided by Matthew Ryan (:ticket:`#48`).
+
+
+What's new in psycopg 2.4
+-------------------------
+
+New features and changes:
+
+- Added support for Python 3.1 and 3.2. The conversion has also
+ brought several improvements:
+
+ - Added 'b' and 't' mode to large objects: write can deal with both
+ bytes strings and unicode; read can return either bytes strings
+ or decoded unicode.
+ - COPY sends Unicode data to files implementing 'io.TextIOBase'.
+ - Improved PostgreSQL-Python encodings mapping.
+ - Added a few missing encodings: EUC_CN, EUC_JIS_2004, ISO885910,
+ ISO885916, LATIN10, SHIFT_JIS_2004.
+ - Dropped repeated dictionary lookups with unicode query/parameters.
+
+- Improvements to the named cursors:
+
+ - More efficient iteration on named cursors, fetching 'itersize'
+ records at time from the backend.
+ - The named cursors name can be an invalid identifier.
+
+- Improvements in data handling:
+
+ - Added 'register_composite()' function to cast PostgreSQL
+ composite types into Python tuples/namedtuples.
+ - Adapt types 'bytearray' (from Python 2.6), 'memoryview' (from
+ Python 2.7) and other objects implementing the "Revised Buffer
+ Protocol" to 'bytea' data type.
+ - The 'hstore' adapter can work even when the data type is not
+ installed in the 'public' namespace.
+ - Raise a clean exception instead of returning bad data when
+ receiving bytea in 'hex' format and the client libpq can't parse
+ them.
+ - Empty lists correctly roundtrip Python -> PostgreSQL -> Python.
+
+- Other changes:
+
+ - 'cursor.description' is provided as named tuples if available.
+ - The build script refuses to guess values if 'pg_config' is not
+ found.
+ - Connections and cursors are weakly referenceable.
+
+Bug fixes:
+
+- Fixed adaptation of None in composite types (:ticket:`#26`). Bug
+ report by Karsten Hilbert.
+- Fixed several reference leaks in less common code paths.
+- Fixed segfault when a large object is closed and its connection no
+ more available.
+- Added missing icon to ZPsycopgDA package, not available in Zope
+ 2.12.9 (:ticket:`#30`). Bug report and patch by Pumukel.
+- Fixed conversion of negative infinity (:ticket:`#40`). Bug report and
+ patch by Marti Raudsepp.
+
+
+What's new in psycopg 2.3.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed segfault with middleware not passing DateStyle to the client
+ (:ticket:`#24`). Bug report and patch by Marti Raudsepp.
+
+
+What's new in psycopg 2.3.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Fixed build problem on CentOS 5.5 x86_64 (:ticket:`#23`).
+
+
+What's new in psycopg 2.3
+-------------------------
+
+psycopg 2.3 aims to expose some new features introduced in PostgreSQL 9.0.
+
+Main new features:
+
+- `dict` to `hstore` adapter and `hstore` to `dict` typecaster, using both
+ 9.0 and pre-9.0 syntax.
+- Two-phase commit protocol support as per DBAPI specification.
+- Support for payload in notifications received from the backend.
+- `namedtuple`-returning cursor.
+- Query execution cancel.
+
+Other features and changes:
+
+- Dropped support for protocol 2: Psycopg 2.3 can only connect to PostgreSQL
+ servers with version at least 7.4.
+- Don't issue a query at every connection to detect the client encoding
+ and to set the datestyle to ISO if it is already compatible with what
+ expected.
+- `mogrify()` now supports unicode queries.
+- Subclasses of a type that can be adapted are adapted as the superclass.
+- `errorcodes` knows a couple of new codes introduced in PostgreSQL 9.0.
+- Dropped deprecated Psycopg "own quoting".
+- Never issue a ROLLBACK on close/GC. This behaviour was introduced as a bug
+ in release 2.2, but trying to send a command while being destroyed has been
+ considered not safe.
+
+Bug fixes:
+
+- Fixed use of `PQfreemem` instead of `free` in binary typecaster.
+- Fixed access to freed memory in `conn_get_isolation_level()`.
+- Fixed crash during Decimal adaptation with a few 2.5.x Python versions
+ (:ticket:`#7`).
+- Fixed notices order (:ticket:`#9`).
+
+
+What's new in psycopg 2.2.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Bux fixes:
+
+- the call to logging.basicConfig() in pool.py has been dropped: it was
+ messing with some projects using logging (and a library should not
+ initialize the logging system anyway.)
+- psycopg now correctly handles time zones with seconds in the UTC offset.
+ The old register_tstz_w_secs() function is deprecated and will raise a
+ warning if called.
+- Exceptions raised by the column iterator are propagated.
+- Exceptions raised by executemany() iterators are propagated.
+
+
+What's new in psycopg 2.2.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Bux fixes:
+
+- psycopg now builds again on MS Windows.
+
+
+What's new in psycopg 2.2
+-------------------------
+
+This is the first release of the new 2.2 series, supporting not just one but
+two different ways of executing asynchronous queries, thanks to Jan and Daniele
+(with a little help from me and others, but they did 99% of the work so they
+deserve their names here in the news.)
+
+psycopg now supports both classic select() loops and "green" coroutine
+libraries. It is all in the documentation, so just point your browser to
+doc/html/advanced.html.
+
+Other new features:
+
+- truncate() method for lobjects.
+- COPY functions are now a little bit faster.
+- All builtin PostgreSQL to Python typecasters are now available from the
+ psycopg2.extensions module.
+- Notifications from the backend are now available right after the execute()
+ call (before client code needed to call isbusy() to ensure NOTIFY
+ reception.)
+- Better timezone support.
+- Lots of documentation updates.
+
+Bug fixes:
+
+- Fixed some gc/refcounting problems.
+- Fixed reference leak in NOTIFY reception.
+- Fixed problem with PostgreSQL not casting string literals to the correct
+ types in some situations: psycopg now add an explicit cast to dates, times
+ and bytea representations.
+- Fixed TimestampFromTicks() and TimeFromTicks() for seconds >= 59.5.
+- Fixed spurious exception raised when calling C typecasters from Python
+ ones.
+
+
+What's new in psycopg 2.0.14
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- Support for adapting tuples to PostgreSQL arrays is now enabled by
+ default and does not require importing psycopg2.extensions anymore.
+- "can't adapt" error message now includes full type information.
+- Thank to Daniele Varrazzo (piro) psycopg2's source package now includes
+ full documentation in HTML and plain text format.
+
+Bug fixes:
+
+- No loss of precision when using floats anymore.
+- decimal.Decimal "nan" and "infinity" correctly converted to PostgreSQL
+ numeric NaN values (note that PostgreSQL numeric type does not support
+ infinity but just NaNs.)
+- psycopg2.extensions now includes Binary.
+
+It seems we're good citizens of the free software ecosystem and that big
+big big companies and people ranting on the pgsql-hackers mailing list
+we'll now not dislike us. *g* (See LICENSE file for the details.)
+
+
+What's new in psycopg 2.0.13
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- Support for UUID arrays.
+- It is now possible to build psycopg linking to a static libpq
+ library.
+
+Bug fixes:
+
+- Fixed a deadlock related to using the same connection with
+ multiple cursors from different threads.
+- Builds again with MSVC.
+
+
+What's new in psycopg 2.0.12
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- The connection object now has a reset() method that can be used to
+ reset the connection to its default state.
+
+Bug fixes:
+
+- copy_to() and copy_from() now accept a much larger number of columns.
+- Fixed PostgreSQL version detection.
+- Fixed ZPsycopgDA version check.
+- Fixed regression in ZPsycopgDA that made it behave wrongly when
+ receiving serialization errors: now the query is re-issued as it
+ should be by propagating the correct exception to Zope.
+- Writing "large" large objects should now work.
+
+
+What's new in psycopg 2.0.11
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- DictRow and RealDictRow now use less memory. If you inherit on them
+ remember to set __slots__ for your new attributes or be prepare to
+ go back to old memory usage.
+
+Bug fixes:
+
+- Fixed exception in setup.py.
+- More robust detection of PostgreSQL development versions.
+- Fixed exception in RealDictCursor, introduced in 2.0.10.
+
+
+What's new in psycopg 2.0.10
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- A specialized type-caster that can parse time zones with seconds is
+ now available. Note that after enabling it (see extras.py) "wrong"
+ time zones will be parsed without raising an exception but the
+ result will be rounded.
+- DictCursor can be used as a named cursor.
+- DictRow now implements more dict methods.
+- The connection object now expose PostgreSQL server version as the
+ .server_version attribute and the protocol version used as
+ .protocol_version.
+- The connection object has a .get_parameter_status() methods that
+ can be used to obtain useful information from the server.
+
+Bug fixes:
+
+- None is now correctly always adapted to NULL.
+- Two double memory free errors provoked by multithreading and
+ garbage collection are now fixed.
+- Fixed usage of internal Python code in the notice processor; this
+ should fix segfaults when receiving a lot of notices in
+ multithreaded programs.
+- Should build again on MSVC and Solaris.
+- Should build with development versions of PostgreSQL (ones with
+ -devel version string.)
+- Fixed some tests that failed even when psycopg was right.
+
+
+What's new in psycopg 2.0.9
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- "import psycopg2.extras" to get some support for handling times
+ and timestamps with seconds in the time zone offset.
+- DictCursors can now be used as named cursors.
+
+Bug fixes:
+
+- register_type() now accept an explicit None as its second parameter.
+- psycopg2 should build again on MSVC and Solaris.
+
+
+What's new in psycopg 2.0.9
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- COPY TO/COPY FROM queries now can be of any size and psycopg will
+ correctly quote separators.
+- float values Inf and NaN are now correctly handled and can
+ round-trip to the database.
+- executemany() now return the numer of total INSERTed or UPDATEd
+ rows. Note that, as it has always been, executemany() should not
+ be used to execute multiple SELECT statements and while it will
+ execute the statements without any problem, it will return the
+ wrong value.
+- copy_from() and copy_to() can now use quoted separators.
+- "import psycopg2.extras" to get UUID support.
+
+Bug fixes:
+
+- register_type() now works on connection and cursor subclasses.
+- fixed a memory leak when using lobjects.
+
+
+What's new in psycopg 2.0.8
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+New features:
+
+- The connection object now has a get_backend_pid() method that
+ returns the current PostgreSQL connection backend process PID.
+- The PostgreSQL large object API has been exposed through the
+ Cursor.lobject() method.
+
+Bug fixes:
+
+- Some fixes to ZPsycopgDA have been merged from the Debian package.
+- A memory leak was fixed in Cursor.executemany().
+- A double free was fixed in pq_complete_error(), that caused crashes
+ under some error conditions.
+
+
+What's new in psycopg 2.0.7
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Improved error handling:
+
+- All instances of psycopg2.Error subclasses now have pgerror,
+ pgcode and cursor attributes. They will be set to None if no
+ value is available.
+- Exception classes are now chosen based on the SQLSTATE value from
+ the result. (#184)
+- The commit() and rollback() methods now set the pgerror and pgcode
+ attributes on exceptions. (#152)
+- errors from commit() and rollback() are no longer considered
+ fatal. (#194)
+- If a disconnect is detected during execute(), an exception will be
+ raised at that point rather than resulting in "ProgrammingError:
+ no results to fetch" later on. (#186)
+
+Better PostgreSQL compatibility:
+
+- If the server uses standard_conforming_strings, perform
+ appropriate quoting.
+- BC dates are now handled if psycopg is compiled with mxDateTime
+ support. If using datetime, an appropriate ValueError is
+ raised. (#203)
+
+Other bug fixes:
+
+- If multiple sub-interpreters are in use, do not share the Decimal
+ type between them. (#192)
+- Buffer objects obtained from psycopg are now accepted by psycopg
+ too, without segfaulting. (#209)
+- A few small changes were made to improve DB-API compatibility.
+ All the dbapi20 tests now pass.
+
+Miscellaneous:
+
+- The PSYCOPG_DISPLAY_SIZE option is now off by default. This means
+ that display size will always be set to "None" in
+ cursor.description. Calculating the display size was expensive,
+ and infrequently used so this should improve performance.
+- New QueryCanceledError and TransactionRollbackError exceptions
+ have been added to the psycopg2.extensions module. They can be
+ used to detect statement timeouts and deadlocks respectively.
+- Cursor objects now have a "closed" attribute. (#164)
+- If psycopg has been built with debug support, it is now necessary
+ to set the PSYCOPG_DEBUG environment variable to turn on debug
+ spew.
+
+
+What's new in psycopg 2.0.6
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Better support for PostgreSQL, Python and win32:
+
+- full support for PostgreSQL 8.2, including NULLs in arrays
+- support for almost all existing PostgreSQL encodings
+- full list of PostgreSQL error codes available by importing the
+ psycopg2.errorcodes module
+- full support for Python 2.5 and 64 bit architectures
+- better build support on win32 platform
+
+Support for per-connection type-casters (used by ZPsycopgDA too, this
+fixes a long standing bug that made different connections use a random
+set of date/time type-casters instead of the configured one.)
+
+Better management of times and dates both from Python and in Zope.
+
+copy_to and copy_from now take an extra "columns" parameter.
+
+Python tuples are now adapted to SQL sequences that can be used with
+the "IN" operator by default if the psycopg2.extensions module is
+imported (i.e., the SQL_IN adapter was moved from extras to extensions.)
+
+Fixed some small buglets and build glitches:
+
+- removed double mutex destroy
+- removed all non-constant initializers
+- fixed PyObject_HEAD declarations to avoid memory corruption
+ on 64 bit architectures
+- fixed several Python API calls to work on 64 bit architectures
+- applied compatibility macros from PEP 353
+- now using more than one argument format raise an error instead of
+ a segfault
+
+
+What's new in psycopg 2.0.5.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Now it really, really builds on MSVC and older gcc versions.
+
+What's new in psycopg 2.0.5
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed various buglets such as:
+
+ - segfault when passing an empty string to Binary()
+ - segfault on null queries
+ - segfault and bad keyword naming in .executemany()
+ - OperationalError in connection objects was always None
+
+* Various changes to ZPsycopgDA to make it more zope2.9-ish.
+
+* connect() now accept both integers and strings as port parameter
+
+What's new in psycopg 2.0.4
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed float conversion bug introduced in 2.0.3.
+
+What's new in psycopg 2.0.3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed various buglets and a memory leak (see ChangeLog for details)
+
+What's new in psycopg 2.0.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed a bug in array typecasting that sometimes made psycopg forget about
+ the last element in the array.
+
+* Fixed some minor buglets in string memory allocations.
+
+* Builds again with compilers different from gcc (#warning about PostgreSQL
+ version is issued only if __GCC__ is defined.)
+
+What's new in psycopg 2.0.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* ZPsycopgDA now actually loads.
+
+What's new in psycopg 2.0
+-------------------------
+
+* Fixed handle leak on win32.
+
+* If available the new "safe" encoding functions of libpq are used.
+
+* django and tinyerp people, please switch to psycopg 2 _without_
+ using a psycopg 1 compatibility layer (this release was anticipated
+ so that you all stop grumbling about psycopg 2 is still in beta.. :)
+
+What's new in psycopg 2.0 beta 7
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Ironed out last problems with times and date (should be quite solid now.)
+
+* Fixed problems with some arrays.
+
+* Slightly better ZPsycopgDA (no more double connection objects in the menu
+ and other minor fixes.)
+
+* ProgrammingError exceptions now have three extra attributes: .cursor
+ (it is possible to access the query that caused the exception using
+ error.cursor.query), .pgerror and .pgcode (PostgreSQL original error
+ text and code.)
+
+* The build system uses pg_config when available.
+
+* Documentation in the doc/ directory! (With many kudos to piro.)
+
+What's new in psycopg 2.0 beta 6
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Support for named cursors.
+
+* Safer parsing of time intervals.
+
+* Better parsing of times and dates, no more locale problems.
+
+* Should now play well with py2exe and similar tools.
+
+* The "decimal" module is now used if available under Python 2.3.
+
+What's new in psycopg 2.0 beta 5
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed all known bugs.
+
+* The initial isolation level is now read from the server and
+ .set_isolation_level() now takes values defined in psycopg2.extensions.
+
+* .callproc() implemented as a SELECT of the given procedure.
+
+* Better docstrings for a few functions/methods.
+
+* Some time-related functions like psycopg2.TimeFromTicks() now take the
+ local timezone into account. Also a tzinfo object (as per datetime module
+ specifications) can be passed to the psycopg2.Time and psycopg2.Datetime
+ constructors.
+
+* All classes have been renamed to exist in the psycopg2._psycopg module,
+ to fix problems with automatic documentation generators like epydoc.
+
+* NOTIFY is correctly trapped.
+
+What's new in psycopg 2.0 beta 4
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* psycopg module is now named psycopg2.
+
+* No more segfaults when a UNICODE query can't be converted to the
+ backend encoding.
+
+* No more segfaults on empty queries.
+
+* psycopg2.connect() now takes an integer for the port keyword parameter.
+
+* "python setup.py bdist_rpm" now works.
+
+* Fixed lots of small bugs, see ChangeLog for details.
+
+What's new in psycopg 2.0 beta 3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* ZPsycopgDA now works (except table browsing.)
+
+* psycopg build again on Python 2.2.
+
+What's new in psycopg 2.0 beta 2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed ZPsycopgDA version check (ZPsycopgDA can now be imported in
+ Zope.)
+
+* psycopg.extras.DictRow works even after a new query on the generating
+ cursor.
+
+* Better setup.py for win32 (should build with MSCV or mingw.)
+
+* Generic fixed and memory leaks plugs.
+
+What's new in psycopg 2.0 beta 1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Officially in beta (i.e., no new features will be added.)
+
+* Array support: list objects can be passed as bound variables and are
+ correctly returned for array columns.
+
+* Added the psycopg.psycopg1 compatibility module (if you want instant
+ psycopg 1 compatibility just "from psycopg import psycopg1 as psycopg".)
+
+* Complete support for BYTEA columns and buffer objects.
+
+* Added error codes to error messages.
+
+* The AsIs adapter is now exported by default (also Decimal objects are
+ adapted using the AsIs adapter (when str() is called on them they
+ already format themselves using the right precision and scale.)
+
+* The connect() function now takes "connection_factory" instead of
+ "factory" as keyword argument.
+
+* New setup.py code to build on win32 using mingw and better error
+ messages on missing datetime headers,
+
+* Internal changes that allow much better user-defined type casters.
+
+* A lot of bugfixes (binary, datetime, 64 bit arches, GIL, .executemany())
+
+What's new in psycopg 1.99.13
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Added missing .executemany() method.
+
+* Optimized type cast from PostgreSQL to Python (psycopg should be even
+ faster than before.)
+
+What's new in psycopg 1.99.12
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* .rowcount should be ok and in sync with psycopg 1.
+
+* Implemented the new COPY FROM/COPY TO code when connection to the
+ backend using libpq protocol 3 (this also removes all asprintf calls:
+ build on win32 works again.) A protocol 3-enabled psycopg *can*
+ connect to an old protocol 2 database and will detect it and use the
+ right code.
+
+* getquoted() called for real by the mogrification code.
+
+What's new in psycopg 1.99.11
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* 'cursor' argument in .cursor() connection method renamed to
+ 'cursor_factory'.
+
+* changed 'tuple_factory' cursor attribute name to 'row_factory'.
+
+* the .cursor attribute is gone and connections and cursors are properly
+ gc-managed.
+
+* fixes to the async core.
+
+What's new in psycopg 1.99.10
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* The adapt() function now fully supports the adaptation protocol
+ described in PEP 246. Note that the adapters registry now is indexed
+ by (type, protocol) and not by type alone. Change your adapters
+ accordingly.
+
+* More configuration options moved from setup.py to setup.cfg.
+
+* Fixed two memory leaks: one in cursor deallocation and one in row
+ fetching (.fetchXXX() methods.)
+
+What's new in psycopg 1.99.9
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Added simple pooling code (psycopg.pool module).
+
+* Added DECIMAL typecaster to convert postgresql DECIMAL and NUMERIC
+ types (i.e, all types with an OID of NUMERICOID.) Note that the
+ DECIMAL typecaster does not set scale and precision on the created
+ objects but uses Python defaults.
+
+* ZPsycopgDA back in and working using the new pooling code.
+
+* Isn't that enough? :)
+
+What's new in psycopg 1.99.8
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* added support for UNICODE queries.
+* added UNICODE typecaster; to activate it just do::
+
+ psycopg.extensions.register_type(psycopg.extensions.UNICODE)
+
+ Note that the UNICODE typecaster override the STRING one, so it is
+ not activated by default.
+
+* cursors now really support the iterator protocol.
+* solved the rounding errors in time conversions.
+* now cursors support .fileno() and .isready() methods, to be used in
+ select() calls.
+* .copy_from() and .copy_in() methods are back in (still using the old
+ protocol, will be updated to use new one in next release.)
+* fixed memory corruption bug reported on win32 platform.
+
+What's new in psycopg 1.99.7
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* added support for tuple factories in cursor objects (removed factory
+ argument in favor of a .tuple_factory attribute on the cursor object);
+ see the new module psycopg.extras for a cursor (DictCursor) that
+ return rows as objects that support indexing both by position and
+ column name.
+* added support for tzinfo objects in datetime.timestamp objects: the
+ PostgreSQL type "timestamp with time zone" is converted to
+ datetime.timestamp with a FixedOffsetTimezone initialized as necessary.
+
+What's new in psycopg 1.99.6
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* sslmode parameter from 1.1.x
+* various datetime conversion improvements.
+* now psycopg should compile without mx or without native datetime
+ (not both, obviously.)
+* included various win32/MSVC fixes (pthread.h changes, winsock2
+ library, include path in setup.py, etc.)
+* ported interval fixes from 1.1.14/1.1.15.
+* the last query executed by a cursor is now available in the
+ .query attribute.
+* conversion of unicode strings to backend encoding now uses a table
+ (that still need to be filled.)
+* cursors now have a .mogrify() method that return the query string
+ instead of executing it.
+* connection objects now have a .dsn read-only attribute that holds the
+ connection string.
+* moved psycopg C module to _psycopg and made psycopg a python module:
+ this allows for a neat separation of DBAPI-2.0 functionality and psycopg
+ extensions; the psycopg namespace will be also used to provide
+ python-only extensions (like the pooling code, some ZPsycopgDA support
+ functions and the like.)
+
+What's new in psycopg 1.99.3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* added support for python 2.3 datetime types (both ways) and made datetime
+ the default set of typecasters when available.
+* added example: dt.py.
+
+What's new in psycopg 1.99.3
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* initial working support for unicode bound variables: UTF-8 and latin-1
+ backend encodings are natively supported (and the encoding.py example even
+ works!)
+* added .set_client_encoding() method on the connection object.
+* added examples: encoding.py, binary.py, lastrowid.py.
+
+What's new in psycopg 1.99.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* better typecasting:
+
+ - DateTimeDelta used for postgresql TIME (merge from 1.1)
+ - BYTEA now is converted to a real buffer object, not to a string
+
+* buffer objects are now adapted into Binary objects automatically.
+* ported scroll method from 1.1 (DBAPI-2.0 extension for cursors)
+* initial support for some DBAPI-2.0 extensions:
+
+ - .rownumber attribute for cursors
+ - .connection attribute for cursors
+ - .next() and .__iter__() methods to have cursors support the iterator
+ protocol
+ - all exception objects are exported to the connection object
+
+What's new in psycopg 1.99.1
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* implemented microprotocols to adapt arbitrary types to the interface used by
+ psycopg to bind variables in execute;
+
+* moved qstring, pboolean and mxdatetime to the new adapter layout (binary is
+ still missing; python 2.3 datetime needs to be written).
+
+
+What's new in psycopg 1.99.0
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* reorganized the whole source tree;
+
+* async core is in place;
+
+* splitted QuotedString objects from mx stuff;
+
+* dropped autotools and moved to pythonic setup.py (needs work.)
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..fa88051
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,107 @@
+Metadata-Version: 1.2
+Name: psycopg2
+Version: 2.9.1
+Summary: psycopg2 - Python-PostgreSQL Database Adapter
+Home-page: https://psycopg.org/
+Author: Federico Di Gregorio
+Author-email: fog@initd.org
+Maintainer: Daniele Varrazzo
+Maintainer-email: daniele.varrazzo@gmail.org
+License: LGPL with exceptions
+Project-URL: Homepage, https://psycopg.org/
+Project-URL: Documentation, https://www.psycopg.org/docs/
+Project-URL: Code, https://github.com/psycopg/psycopg2
+Project-URL: Issue Tracker, https://github.com/psycopg/psycopg2/issues
+Project-URL: Download, https://pypi.org/project/psycopg2/
+Description: Psycopg is the most popular PostgreSQL database adapter for the Python
+ programming language. Its main features are the complete implementation of
+ the Python DB API 2.0 specification and the thread safety (several threads can
+ share the same connection). It was designed for heavily multi-threaded
+ applications that create and destroy lots of cursors and make a large number
+ of concurrent "INSERT"s or "UPDATE"s.
+
+ Psycopg 2 is mostly implemented in C as a libpq wrapper, resulting in being
+ both efficient and secure. It features client-side and server-side cursors,
+ asynchronous communication and notifications, "COPY TO/COPY FROM" support.
+ Many Python types are supported out-of-the-box and adapted to matching
+ PostgreSQL data types; adaptation can be extended and customized thanks to a
+ flexible objects adaptation system.
+
+ Psycopg 2 is both Unicode and Python 3 friendly.
+
+
+ Documentation
+ -------------
+
+ Documentation is included in the ``doc`` directory and is `available online`__.
+
+ .. __: https://www.psycopg.org/docs/
+
+ For any other resource (source code repository, bug tracker, mailing list)
+ please check the `project homepage`__.
+
+ .. __: https://psycopg.org/
+
+
+ Installation
+ ------------
+
+ Building Psycopg requires a few prerequisites (a C compiler, some development
+ packages): please check the install_ and the faq_ documents in the ``doc`` dir
+ or online for the details.
+
+ If prerequisites are met, you can install psycopg like any other Python
+ package, using ``pip`` to download it from PyPI_::
+
+ $ pip install psycopg2
+
+ or using ``setup.py`` if you have downloaded the source package locally::
+
+ $ python setup.py build
+ $ sudo python setup.py install
+
+ You can also obtain a stand-alone package, not requiring a compiler or
+ external libraries, by installing the `psycopg2-binary`_ package from PyPI::
+
+ $ pip install psycopg2-binary
+
+ The binary package is a practical choice for development and testing but in
+ production it is advised to use the package built from sources.
+
+ .. _PyPI: https://pypi.org/project/psycopg2/
+ .. _psycopg2-binary: https://pypi.org/project/psycopg2-binary/
+ .. _install: https://www.psycopg.org/docs/install.html#install-from-source
+ .. _faq: https://www.psycopg.org/docs/faq.html#faq-compile
+
+ :Linux/OSX: |gh-actions|
+ :Windows: |appveyor|
+
+ .. |gh-actions| image:: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml/badge.svg
+ :target: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml
+ :alt: Linux and OSX build status
+
+ .. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/psycopg/psycopg2?branch=master&svg=true
+ :target: https://ci.appveyor.com/project/psycopg/psycopg2/branch/master
+ :alt: Windows build status
+
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: C
+Classifier: Programming Language :: SQL
+Classifier: Topic :: Database
+Classifier: Topic :: Database :: Front-Ends
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: Unix
+Requires-Python: >=3.6
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..c9747da
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,73 @@
+psycopg2 - Python-PostgreSQL Database Adapter
+=============================================
+
+Psycopg is the most popular PostgreSQL database adapter for the Python
+programming language. Its main features are the complete implementation of
+the Python DB API 2.0 specification and the thread safety (several threads can
+share the same connection). It was designed for heavily multi-threaded
+applications that create and destroy lots of cursors and make a large number
+of concurrent "INSERT"s or "UPDATE"s.
+
+Psycopg 2 is mostly implemented in C as a libpq wrapper, resulting in being
+both efficient and secure. It features client-side and server-side cursors,
+asynchronous communication and notifications, "COPY TO/COPY FROM" support.
+Many Python types are supported out-of-the-box and adapted to matching
+PostgreSQL data types; adaptation can be extended and customized thanks to a
+flexible objects adaptation system.
+
+Psycopg 2 is both Unicode and Python 3 friendly.
+
+
+Documentation
+-------------
+
+Documentation is included in the ``doc`` directory and is `available online`__.
+
+.. __: https://www.psycopg.org/docs/
+
+For any other resource (source code repository, bug tracker, mailing list)
+please check the `project homepage`__.
+
+.. __: https://psycopg.org/
+
+
+Installation
+------------
+
+Building Psycopg requires a few prerequisites (a C compiler, some development
+packages): please check the install_ and the faq_ documents in the ``doc`` dir
+or online for the details.
+
+If prerequisites are met, you can install psycopg like any other Python
+package, using ``pip`` to download it from PyPI_::
+
+ $ pip install psycopg2
+
+or using ``setup.py`` if you have downloaded the source package locally::
+
+ $ python setup.py build
+ $ sudo python setup.py install
+
+You can also obtain a stand-alone package, not requiring a compiler or
+external libraries, by installing the `psycopg2-binary`_ package from PyPI::
+
+ $ pip install psycopg2-binary
+
+The binary package is a practical choice for development and testing but in
+production it is advised to use the package built from sources.
+
+.. _PyPI: https://pypi.org/project/psycopg2/
+.. _psycopg2-binary: https://pypi.org/project/psycopg2-binary/
+.. _install: https://www.psycopg.org/docs/install.html#install-from-source
+.. _faq: https://www.psycopg.org/docs/faq.html#faq-compile
+
+:Linux/OSX: |gh-actions|
+:Windows: |appveyor|
+
+.. |gh-actions| image:: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml/badge.svg
+ :target: https://github.com/psycopg/psycopg2/actions/workflows/tests.yml
+ :alt: Linux and OSX build status
+
+.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/psycopg/psycopg2?branch=master&svg=true
+ :target: https://ci.appveyor.com/project/psycopg/psycopg2/branch/master
+ :alt: Windows build status
diff --git a/doc/COPYING.LESSER b/doc/COPYING.LESSER
new file mode 100644
index 0000000..cca7fc2
--- /dev/null
+++ b/doc/COPYING.LESSER
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..4b2752b
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,39 @@
+.PHONY: env help clean html package doctest
+
+docs: html
+
+check: doctest
+
+# The environment is currently required to build the documentation.
+# It is not clean by 'make clean'
+
+PYTHON := python$(PYTHON_VERSION)
+PYTHON_VERSION ?= $(shell $(PYTHON) -c 'import sys; print ("%d.%d" % sys.version_info[:2])')
+BUILD_DIR = $(shell pwd)/../build/lib.$(PYTHON_VERSION)
+
+SPHINXBUILD ?= $$(pwd)/env/bin/sphinx-build
+SPHOPTS = SPHINXBUILD=$(SPHINXBUILD)
+
+html: package src/sqlstate_errors.rst
+ $(MAKE) $(SPHOPTS) -C src $@
+ cp -r src/_build/html .
+
+src/sqlstate_errors.rst: ../psycopg/sqlstate_errors.h $(BUILD_DIR)
+ env/bin/python src/tools/make_sqlstate_docs.py $< > $@
+
+$(BUILD_DIR):
+ $(MAKE) PYTHON=$(PYTHON) -C .. package
+
+doctest:
+ $(MAKE) PYTHON=$(PYTHON) -C .. package
+ $(MAKE) $(SPHOPTS) -C src $@
+
+clean:
+ $(MAKE) $(SPHOPTS) -C src $@
+ rm -rf html src/sqlstate_errors.rst
+
+env: requirements.txt
+ virtualenv -p $(PYTHON) env
+ ./env/bin/pip install -r requirements.txt
+ echo "$$(pwd)/../build/lib.$(PYTHON_VERSION)" \
+ > env/lib/python$(PYTHON_VERSION)/site-packages/psycopg.pth
diff --git a/doc/README.rst b/doc/README.rst
new file mode 100644
index 0000000..7c435b3
--- /dev/null
+++ b/doc/README.rst
@@ -0,0 +1,20 @@
+How to build psycopg documentation
+----------------------------------
+
+Building the documentation usually requires building the library too for
+introspection, so you will need the same prerequisites_. The only extra
+prerequisite is virtualenv_: the packages needed to build the docs will be
+installed when building the env.
+
+.. _prerequisites: https://www.psycopg.org/docs/install.html#install-from-source
+.. _virtualenv: https://virtualenv.pypa.io/en/latest/
+
+Build the env once with::
+
+ make env
+
+Then you can build the documentation with::
+
+ make
+
+You should find the rendered documentation in the ``html`` directory.
diff --git a/doc/SUCCESS b/doc/SUCCESS
new file mode 100644
index 0000000..de45991
--- /dev/null
+++ b/doc/SUCCESS
@@ -0,0 +1,114 @@
+From: Jack Moffitt <jack@xiph.org>
+To: Psycopg Mailing List <psycopg@lists.initd.org>
+Subject: Re: [Psycopg] preparing for 1.0
+Date: 22 Oct 2001 11:16:21 -0600
+
+www.vorbis.com is serving from 5-10k pages per day with psycopg serving
+data for most of that.
+
+I plan to use it for several of our other sites, so that number will
+increase.
+
+I've never had a single problem (that wasn't my fault) besides those
+segfaults, and those are now gone as well, and I've been using psycopg
+since June (around 0.99.2?).
+
+jack.
+
+
+From: Yury Don <gercon@vpcit.ru>
+To: Psycopg Mailing List <psycopg@lists.initd.org>
+Subject: Re: [Psycopg] preparing for 1.0
+Date: 23 Oct 2001 09:53:11 +0600
+
+We use psycopg and psycopg zope adapter since fisrt public
+release (it seems version 0.4). Now it works on 3 our sites and in intranet
+applications. We had few problems, but all problems were quickly
+solved. The strong side of psycopg is that it's code is well organized
+and easy to understand. When I found a problem with non-ISO datestyle in first
+version of psycopg, it took for me 15 or 20 minutes to learn code and
+to solve the problem, even thouth my knowledge of c were poor.
+
+BTW, segfault with dictfetchall on particular data set (see [Psycopg]
+dictfetchXXX() problems) disappeared in 0.99.8pre2.
+
+--
+Best regards,
+Yury Don
+
+
+From: Tom Jenkins <tjenkins@devis.com>
+To: Federico Di Gregorio <fog@debian.org>
+Cc: Psycopg Mailing List <psycopg@lists.initd.org>
+Subject: Re: [Psycopg] preparing for 1.0
+Date: 23 Oct 2001 08:25:52 -0400
+
+The US Govt Department of Labor's Office of Disability Employment
+Policy's DisabilityDirect website is run on zope and zpsycopg.
+
+
+From: Scott Leerssen <sleerssen@racemi.com>
+To: Federico Di Gregorio <fog@debian.org>
+Subject: Re: [Psycopg] preparing for 1.0
+Date: 23 Oct 2001 09:56:10 -0400
+
+Racemi's load management software infrastructure uses psycopg to handle
+complex server allocation decisions, plus storage and access of
+environmental conditions and accounting records for potentially
+thousands of servers. Psycopg has, to this point, been the only
+Python/PostGreSQL interface that could handle the scaling required for
+our multithreaded applications.
+
+Scott
+
+
+From: Andre Schubert <andre.schubert@geyer.kabeljournal.de>
+To: Federico Di Gregorio <fog@debian.org>
+Cc: Psycopg Mailing List <psycopg@lists.initd.org>
+Subject: Re: [Psycopg] preparing for 1.0
+Date: 23 Oct 2001 11:46:07 +0200
+
+i have changed the psycopg version to 0.99.8pre2 on all devel-machines
+and all segfaults are gone. after my holiday i wil change to 0.99.8pre2
+or 1.0 on our production-server.
+this server contains several web-sites which are all connected to
+postgres over ZPsycopgDA.
+
+thanks as
+
+
+From: Fred Wilson Horch <fhorch@ecoaccess.org>
+To: <psycopg@lists.initd.org>
+Subject: [Psycopg] Success story for psycopg
+Date: 23 Oct 2001 10:59:17 -0400
+
+Due to various quirks of PyGreSQL and PoPy, EcoAccess has been looking for
+a reliable, fast and relatively bug-free Python-PostgreSQL interface for
+our project.
+
+Binary support in psycopg, along with the umlimited tuple size in
+PostgreSQL 7.1, allowed us to quickly prototype a database-backed file
+storage web application, which we're using for file sharing among our
+staff and volunteers. Using a database backend instead of a file system
+allows us to easily enrich the meta-information associated with each file
+and simplifies our data handling routines.
+
+We've been impressed by the responsiveness of the psycopg team to bug
+reports and feature requests, and we're looking forward to using psycopg
+as the Python interface for additional database-backed web applications.
+
+Keep up the good work!
+--
+Fred Wilson Horch mailto:fhorch@ecoaccess.org
+Executive Director, EcoAccess http://ecoaccess.org/
+
+
+From: Damon Fasching <fasching@design.lbl.gov>
+To: Michele Comitini <mcm@glisco.it>
+Cc: fog@debian.org
+Subject: Re: How does one create a database within Python using psycopg?
+Date: 25 Feb 2002 17:39:41 -0800
+
+[snip]
+btw I checked out 4 different Python-PostgreSQL packages. psycopg is the
+only one which built and imported w/o any trouble! (At least for me.)
diff --git a/doc/pep-0249.txt b/doc/pep-0249.txt
new file mode 100644
index 0000000..e74fd0d
--- /dev/null
+++ b/doc/pep-0249.txt
@@ -0,0 +1,1005 @@
+PEP: 249
+Title: Python Database API Specification v2.0
+Version: $Revision: 1555 $
+Author: db-sig@python.org (Python Database SIG)
+Editor: mal@lemburg.com (Marc-Andre Lemburg)
+Status: Final
+Type: Informational
+Replaces: 248
+Release-Date: 07 Apr 1999
+
+Introduction
+
+ This API has been defined to encourage similarity between the
+ Python modules that are used to access databases. By doing this,
+ we hope to achieve a consistency leading to more easily understood
+ modules, code that is generally more portable across databases,
+ and a broader reach of database connectivity from Python.
+
+ The interface specification consists of several sections:
+
+ * Module Interface
+ * Connection Objects
+ * Cursor Objects
+ * DBI Helper Objects
+ * Type Objects and Constructors
+ * Implementation Hints
+ * Major Changes from 1.0 to 2.0
+
+ Comments and questions about this specification may be directed
+ to the SIG for Database Interfacing with Python
+ (db-sig@python.org).
+
+ For more information on database interfacing with Python and
+ available packages see the Database Topic
+ Guide at http://www.python.org/topics/database/.
+
+ This document describes the Python Database API Specification 2.0
+ and a set of common optional extensions. The previous version 1.0
+ version is still available as reference, in PEP 248. Package
+ writers are encouraged to use this version of the specification as
+ basis for new interfaces.
+
+Module Interface
+
+ Access to the database is made available through connection
+ objects. The module must provide the following constructor for
+ these:
+
+ connect(parameters...)
+
+ Constructor for creating a connection to the database.
+ Returns a Connection Object. It takes a number of
+ parameters which are database dependent. [1]
+
+ These module globals must be defined:
+
+ apilevel
+
+ String constant stating the supported DB API level.
+ Currently only the strings '1.0' and '2.0' are allowed.
+
+ If not given, a DB-API 1.0 level interface should be
+ assumed.
+
+ threadsafety
+
+ Integer constant stating the level of thread safety the
+ interface supports. Possible values are:
+
+ 0 Threads may not share the module.
+ 1 Threads may share the module, but not connections.
+ 2 Threads may share the module and connections.
+ 3 Threads may share the module, connections and
+ cursors.
+
+ Sharing in the above context means that two threads may
+ use a resource without wrapping it using a mutex semaphore
+ to implement resource locking. Note that you cannot always
+ make external resources thread safe by managing access
+ using a mutex: the resource may rely on global variables
+ or other external sources that are beyond your control.
+
+ paramstyle
+
+ String constant stating the type of parameter marker
+ formatting expected by the interface. Possible values are
+ [2]:
+
+ 'qmark' Question mark style,
+ e.g. '...WHERE name=?'
+ 'numeric' Numeric, positional style,
+ e.g. '...WHERE name=:1'
+ 'named' Named style,
+ e.g. '...WHERE name=:name'
+ 'format' ANSI C printf format codes,
+ e.g. '...WHERE name=%s'
+ 'pyformat' Python extended format codes,
+ e.g. '...WHERE name=%(name)s'
+
+ The module should make all error information available through
+ these exceptions or subclasses thereof:
+
+ Warning
+
+ Exception raised for important warnings like data
+ truncations while inserting, etc. It must be a subclass of
+ the Python StandardError (defined in the module
+ exceptions).
+
+ Error
+
+ Exception that is the base class of all other error
+ exceptions. You can use this to catch all errors with one
+ single 'except' statement. Warnings are not considered
+ errors and thus should not use this class as base. It must
+ be a subclass of the Python StandardError (defined in the
+ module exceptions).
+
+ InterfaceError
+
+ Exception raised for errors that are related to the
+ database interface rather than the database itself. It
+ must be a subclass of Error.
+
+ DatabaseError
+
+ Exception raised for errors that are related to the
+ database. It must be a subclass of Error.
+
+ DataError
+
+ Exception raised for errors that are due to problems with
+ the processed data like division by zero, numeric value
+ out of range, etc. It must be a subclass of DatabaseError.
+
+ OperationalError
+
+ Exception raised for errors that are related to the
+ database's operation and not necessarily under the control
+ of the programmer, e.g. an unexpected disconnect occurs,
+ the data source name is not found, a transaction could not
+ be processed, a memory allocation error occurred during
+ processing, etc. It must be a subclass of DatabaseError.
+
+ IntegrityError
+
+ Exception raised when the relational integrity of the
+ database is affected, e.g. a foreign key check fails. It
+ must be a subclass of DatabaseError.
+
+ InternalError
+
+ Exception raised when the database encounters an internal
+ error, e.g. the cursor is not valid anymore, the
+ transaction is out of sync, etc. It must be a subclass of
+ DatabaseError.
+
+ ProgrammingError
+
+ Exception raised for programming errors, e.g. table not
+ found or already exists, syntax error in the SQL
+ statement, wrong number of parameters specified, etc. It
+ must be a subclass of DatabaseError.
+
+ NotSupportedError
+
+ Exception raised in case a method or database API was used
+ which is not supported by the database, e.g. requesting a
+ .rollback() on a connection that does not support
+ transaction or has transactions turned off. It must be a
+ subclass of DatabaseError.
+
+ This is the exception inheritance layout:
+
+ StandardError
+ |__Warning
+ |__Error
+ |__InterfaceError
+ |__DatabaseError
+ |__DataError
+ |__OperationalError
+ |__IntegrityError
+ |__InternalError
+ |__ProgrammingError
+ |__NotSupportedError
+
+ Note: The values of these exceptions are not defined. They should
+ give the user a fairly good idea of what went wrong, though.
+
+
+Connection Objects
+
+ Connection Objects should respond to the following methods:
+
+ .close()
+
+ Close the connection now (rather than whenever __del__ is
+ called). The connection will be unusable from this point
+ forward; an Error (or subclass) exception will be raised
+ if any operation is attempted with the connection. The
+ same applies to all cursor objects trying to use the
+ connection. Note that closing a connection without
+ committing the changes first will cause an implicit
+ rollback to be performed.
+
+
+ .commit()
+
+ Commit any pending transaction to the database. Note that
+ if the database supports an auto-commit feature, this must
+ be initially off. An interface method may be provided to
+ turn it back on.
+
+ Database modules that do not support transactions should
+ implement this method with void functionality.
+
+ .rollback()
+
+ This method is optional since not all databases provide
+ transaction support. [3]
+
+ In case a database does provide transactions this method
+ causes the the database to roll back to the start of any
+ pending transaction. Closing a connection without
+ committing the changes first will cause an implicit
+ rollback to be performed.
+
+ .cursor()
+
+ Return a new Cursor Object using the connection. If the
+ database does not provide a direct cursor concept, the
+ module will have to emulate cursors using other means to
+ the extent needed by this specification. [4]
+
+
+Cursor Objects
+
+ These objects represent a database cursor, which is used to
+ manage the context of a fetch operation. Cursors created from
+ the same connection are not isolated, i.e., any changes
+ done to the database by a cursor are immediately visible by the
+ other cursors. Cursors created from different connections can
+ or can not be isolated, depending on how the transaction support
+ is implemented (see also the connection's rollback() and commit()
+ methods.)
+
+ Cursor Objects should respond to the following methods and
+ attributes:
+
+ .description
+
+ This read-only attribute is a sequence of 7-item
+ sequences. Each of these sequences contains information
+ describing one result column: (name, type_code,
+ display_size, internal_size, precision, scale,
+ null_ok). The first two items (name and type_code) are
+ mandatory, the other five are optional and must be set to
+ None if meaningful values are not provided.
+
+ This attribute will be None for operations that
+ do not return rows or if the cursor has not had an
+ operation invoked via the executeXXX() method yet.
+
+ The type_code can be interpreted by comparing it to the
+ Type Objects specified in the section below.
+
+ .rowcount
+
+ This read-only attribute specifies the number of rows that
+ the last executeXXX() produced (for DQL statements like
+ 'select') or affected (for DML statements like 'update' or
+ 'insert').
+
+ The attribute is -1 in case no executeXXX() has been
+ performed on the cursor or the rowcount of the last
+ operation is not determinable by the interface. [7]
+
+ Note: Future versions of the DB API specification could
+ redefine the latter case to have the object return None
+ instead of -1.
+
+ .callproc(procname[,parameters])
+
+ (This method is optional since not all databases provide
+ stored procedures. [3])
+
+ Call a stored database procedure with the given name. The
+ sequence of parameters must contain one entry for each
+ argument that the procedure expects. The result of the
+ call is returned as modified copy of the input
+ sequence. Input parameters are left untouched, output and
+ input/output parameters replaced with possibly new values.
+
+ The procedure may also provide a result set as
+ output. This must then be made available through the
+ standard fetchXXX() methods.
+
+ .close()
+
+ Close the cursor now (rather than whenever __del__ is
+ called). The cursor will be unusable from this point
+ forward; an Error (or subclass) exception will be raised
+ if any operation is attempted with the cursor.
+
+ .execute(operation[,parameters])
+
+ Prepare and execute a database operation (query or
+ command). Parameters may be provided as sequence or
+ mapping and will be bound to variables in the operation.
+ Variables are specified in a database-specific notation
+ (see the module's paramstyle attribute for details). [5]
+
+ A reference to the operation will be retained by the
+ cursor. If the same operation object is passed in again,
+ then the cursor can optimize its behavior. This is most
+ effective for algorithms where the same operation is used,
+ but different parameters are bound to it (many times).
+
+ For maximum efficiency when reusing an operation, it is
+ best to use the setinputsizes() method to specify the
+ parameter types and sizes ahead of time. It is legal for
+ a parameter to not match the predefined information; the
+ implementation should compensate, possibly with a loss of
+ efficiency.
+
+ The parameters may also be specified as list of tuples to
+ e.g. insert multiple rows in a single operation, but this
+ kind of usage is depreciated: executemany() should be used
+ instead.
+
+ Return values are not defined.
+
+ .executemany(operation,seq_of_parameters)
+
+ Prepare a database operation (query or command) and then
+ execute it against all parameter sequences or mappings
+ found in the sequence seq_of_parameters.
+
+ Modules are free to implement this method using multiple
+ calls to the execute() method or by using array operations
+ to have the database process the sequence as a whole in
+ one call.
+
+ Use of this method for an operation which produces one or
+ more result sets constitutes undefined behavior, and the
+ implementation is permitted (but not required) to raise
+ an exception when it detects that a result set has been
+ created by an invocation of the operation.
+
+ The same comments as for execute() also apply accordingly
+ to this method.
+
+ Return values are not defined.
+
+ .fetchone()
+
+ Fetch the next row of a query result set, returning a
+ single sequence, or None when no more data is
+ available. [6]
+
+ An Error (or subclass) exception is raised if the previous
+ call to executeXXX() did not produce any result set or no
+ call was issued yet.
+
+ fetchmany([size=cursor.arraysize])
+
+ Fetch the next set of rows of a query result, returning a
+ sequence of sequences (e.g. a list of tuples). An empty
+ sequence is returned when no more rows are available.
+
+ The number of rows to fetch per call is specified by the
+ parameter. If it is not given, the cursor's arraysize
+ determines the number of rows to be fetched. The method
+ should try to fetch as many rows as indicated by the size
+ parameter. If this is not possible due to the specified
+ number of rows not being available, fewer rows may be
+ returned.
+
+ An Error (or subclass) exception is raised if the previous
+ call to executeXXX() did not produce any result set or no
+ call was issued yet.
+
+ Note there are performance considerations involved with
+ the size parameter. For optimal performance, it is
+ usually best to use the arraysize attribute. If the size
+ parameter is used, then it is best for it to retain the
+ same value from one fetchmany() call to the next.
+
+ .fetchall()
+
+ Fetch all (remaining) rows of a query result, returning
+ them as a sequence of sequences (e.g. a list of tuples).
+ Note that the cursor's arraysize attribute can affect the
+ performance of this operation.
+
+ An Error (or subclass) exception is raised if the previous
+ call to executeXXX() did not produce any result set or no
+ call was issued yet.
+
+ .nextset()
+
+ (This method is optional since not all databases support
+ multiple result sets. [3])
+
+ This method will make the cursor skip to the next
+ available set, discarding any remaining rows from the
+ current set.
+
+ If there are no more sets, the method returns
+ None. Otherwise, it returns a true value and subsequent
+ calls to the fetch methods will return rows from the next
+ result set.
+
+ An Error (or subclass) exception is raised if the previous
+ call to executeXXX() did not produce any result set or no
+ call was issued yet.
+
+ .arraysize
+
+ This read/write attribute specifies the number of rows to
+ fetch at a time with fetchmany(). It defaults to 1 meaning
+ to fetch a single row at a time.
+
+ Implementations must observe this value with respect to
+ the fetchmany() method, but are free to interact with the
+ database a single row at a time. It may also be used in
+ the implementation of executemany().
+
+ .setinputsizes(sizes)
+
+ This can be used before a call to executeXXX() to
+ predefine memory areas for the operation's parameters.
+
+ sizes is specified as a sequence -- one item for each
+ input parameter. The item should be a Type Object that
+ corresponds to the input that will be used, or it should
+ be an integer specifying the maximum length of a string
+ parameter. If the item is None, then no predefined memory
+ area will be reserved for that column (this is useful to
+ avoid predefined areas for large inputs).
+
+ This method would be used before the executeXXX() method
+ is invoked.
+
+ Implementations are free to have this method do nothing
+ and users are free to not use it.
+
+ .setoutputsize(size[,column])
+
+ Set a column buffer size for fetches of large columns
+ (e.g. LONGs, BLOBs, etc.). The column is specified as an
+ index into the result sequence. Not specifying the column
+ will set the default size for all large columns in the
+ cursor.
+
+ This method would be used before the executeXXX() method
+ is invoked.
+
+ Implementations are free to have this method do nothing
+ and users are free to not use it.
+
+
+Type Objects and Constructors
+
+ Many databases need to have the input in a particular format for
+ binding to an operation's input parameters. For example, if an
+ input is destined for a DATE column, then it must be bound to the
+ database in a particular string format. Similar problems exist
+ for "Row ID" columns or large binary items (e.g. blobs or RAW
+ columns). This presents problems for Python since the parameters
+ to the executeXXX() method are untyped. When the database module
+ sees a Python string object, it doesn't know if it should be bound
+ as a simple CHAR column, as a raw BINARY item, or as a DATE.
+
+ To overcome this problem, a module must provide the constructors
+ defined below to create objects that can hold special values.
+ When passed to the cursor methods, the module can then detect the
+ proper type of the input parameter and bind it accordingly.
+
+ A Cursor Object's description attribute returns information about
+ each of the result columns of a query. The type_code must compare
+ equal to one of Type Objects defined below. Type Objects may be
+ equal to more than one type code (e.g. DATETIME could be equal to
+ the type codes for date, time and timestamp columns; see the
+ Implementation Hints below for details).
+
+ The module exports the following constructors and singletons:
+
+ Date(year,month,day)
+
+ This function constructs an object holding a date value.
+
+ Time(hour,minute,second)
+
+ This function constructs an object holding a time value.
+
+ Timestamp(year,month,day,hour,minute,second)
+
+ This function constructs an object holding a time stamp
+ value.
+
+ DateFromTicks(ticks)
+
+ This function constructs an object holding a date value
+ from the given ticks value (number of seconds since the
+ epoch; see the documentation of the standard Python time
+ module for details).
+
+ TimeFromTicks(ticks)
+
+ This function constructs an object holding a time value
+ from the given ticks value (number of seconds since the
+ epoch; see the documentation of the standard Python time
+ module for details).
+
+ TimestampFromTicks(ticks)
+
+ This function constructs an object holding a time stamp
+ value from the given ticks value (number of seconds since
+ the epoch; see the documentation of the standard Python
+ time module for details).
+
+ Binary(string)
+
+ This function constructs an object capable of holding a
+ binary (long) string value.
+
+
+ STRING
+
+ This type object is used to describe columns in a database
+ that are string-based (e.g. CHAR).
+
+ BINARY
+
+ This type object is used to describe (long) binary columns
+ in a database (e.g. LONG, RAW, BLOBs).
+
+ NUMBER
+
+ This type object is used to describe numeric columns in a
+ database.
+
+ DATETIME
+
+ This type object is used to describe date/time columns in
+ a database.
+
+ ROWID
+
+ This type object is used to describe the "Row ID" column
+ in a database.
+
+ SQL NULL values are represented by the Python None singleton on
+ input and output.
+
+ Note: Usage of Unix ticks for database interfacing can cause
+ troubles because of the limited date range they cover.
+
+
+Implementation Hints for Module Authors
+
+ * The preferred object types for the date/time objects are those
+ defined in the mxDateTime package. It provides all necessary
+ constructors and methods both at Python and C level.
+
+ * The preferred object type for Binary objects are the
+ buffer types available in standard Python starting with
+ version 1.5.2. Please see the Python documentation for
+ details. For information about the the C interface have a
+ look at Include/bufferobject.h and
+ Objects/bufferobject.c in the Python source
+ distribution.
+
+ * Starting with Python 2.3, module authors can also use the object
+ types defined in the standard datetime module for date/time
+ processing. However, it should be noted that this does not
+ expose a C API like mxDateTime does which means that integration
+ with C based database modules is more difficult.
+
+ * Here is a sample implementation of the Unix ticks based
+ constructors for date/time delegating work to the generic
+ constructors:
+
+ import time
+
+ def DateFromTicks(ticks):
+ return apply(Date,time.localtime(ticks)[:3])
+
+ def TimeFromTicks(ticks):
+ return apply(Time,time.localtime(ticks)[3:6])
+
+ def TimestampFromTicks(ticks):
+ return apply(Timestamp,time.localtime(ticks)[:6])
+
+ * This Python class allows implementing the above type
+ objects even though the description type code field yields
+ multiple values for on type object:
+
+ class DBAPITypeObject:
+ def __init__(self,*values):
+ self.values = values
+ def __cmp__(self,other):
+ if other in self.values:
+ return 0
+ if other < self.values:
+ return 1
+ else:
+ return -1
+
+ The resulting type object compares equal to all values
+ passed to the constructor.
+
+ * Here is a snippet of Python code that implements the exception
+ hierarchy defined above:
+
+ import exceptions
+
+ class Error(exceptions.StandardError):
+ pass
+
+ class Warning(exceptions.StandardError):
+ pass
+
+ class InterfaceError(Error):
+ pass
+
+ class DatabaseError(Error):
+ pass
+
+ class InternalError(DatabaseError):
+ pass
+
+ class OperationalError(DatabaseError):
+ pass
+
+ class ProgrammingError(DatabaseError):
+ pass
+
+ class IntegrityError(DatabaseError):
+ pass
+
+ class DataError(DatabaseError):
+ pass
+
+ class NotSupportedError(DatabaseError):
+ pass
+
+ In C you can use the PyErr_NewException(fullname,
+ base, NULL) API to create the exception objects.
+
+
+Optional DB API Extensions
+
+ During the lifetime of DB API 2.0, module authors have often
+ extended their implementations beyond what is required by this DB
+ API specification. To enhance compatibility and to provide a clean
+ upgrade path to possible future versions of the specification,
+ this section defines a set of common extensions to the core DB API
+ 2.0 specification.
+
+ As with all DB API optional features, the database module authors
+ are free to not implement these additional attributes and methods
+ (using them will then result in an AttributeError) or to raise a
+ NotSupportedError in case the availability can only be checked at
+ run-time.
+
+ It has been proposed to make usage of these extensions optionally
+ visible to the programmer by issuing Python warnings through the
+ Python warning framework. To make this feature useful, the warning
+ messages must be standardized in order to be able to mask them. These
+ standard messages are referred to below as "Warning Message".
+
+ Cursor Attribute .rownumber
+
+ This read-only attribute should provide the current 0-based
+ index of the cursor in the result set or None if the index cannot
+ be determined.
+
+ The index can be seen as index of the cursor in a sequence (the
+ result set). The next fetch operation will fetch the row
+ indexed by .rownumber in that sequence.
+
+ Warning Message: "DB-API extension cursor.rownumber used"
+
+ Connection Attributes .Error, .ProgrammingError, etc.
+
+ All exception classes defined by the DB API standard should be
+ exposed on the Connection objects are attributes (in addition
+ to being available at module scope).
+
+ These attributes simplify error handling in multi-connection
+ environments.
+
+ Warning Message: "DB-API extension connection.<exception> used"
+
+ Cursor Attributes .connection
+
+ This read-only attribute return a reference to the Connection
+ object on which the cursor was created.
+
+ The attribute simplifies writing polymorph code in
+ multi-connection environments.
+
+ Warning Message: "DB-API extension cursor.connection used"
+
+ Cursor Method .scroll(value[,mode='relative'])
+
+ Scroll the cursor in the result set to a new position according
+ to mode.
+
+ If mode is 'relative' (default), value is taken as offset to
+ the current position in the result set, if set to 'absolute',
+ value states an absolute target position.
+
+ An IndexError should be raised in case a scroll operation would
+ leave the result set. In this case, the cursor position is left
+ undefined (ideal would be to not move the cursor at all).
+
+ Note: This method should use native scrollable cursors, if
+ available , or revert to an emulation for forward-only
+ scrollable cursors. The method may raise NotSupportedErrors to
+ signal that a specific operation is not supported by the
+ database (e.g. backward scrolling).
+
+ Warning Message: "DB-API extension cursor.scroll() used"
+
+ Cursor Attribute .messages
+
+ This is a Python list object to which the interface appends
+ tuples (exception class, exception value) for all messages
+ which the interfaces receives from the underlying database for
+ this cursor.
+
+ The list is cleared by all standard cursor methods calls (prior
+ to executing the call) except for the .fetchXXX() calls
+ automatically to avoid excessive memory usage and can also be
+ cleared by executing "del cursor.messages[:]".
+
+ All error and warning messages generated by the database are
+ placed into this list, so checking the list allows the user to
+ verify correct operation of the method calls.
+
+ The aim of this attribute is to eliminate the need for a
+ Warning exception which often causes problems (some warnings
+ really only have informational character).
+
+ Warning Message: "DB-API extension cursor.messages used"
+
+ Connection Attribute .messages
+
+ Same as cursor.messages except that the messages in the list
+ are connection oriented.
+
+ The list is cleared automatically by all standard connection
+ methods calls (prior to executing the call) to avoid excessive
+ memory usage and can also be cleared by executing "del
+ connection.messages[:]".
+
+ Warning Message: "DB-API extension connection.messages used"
+
+ Cursor Method .next()
+
+ Return the next row from the currently executing SQL statement
+ using the same semantics as .fetchone(). A StopIteration
+ exception is raised when the result set is exhausted for Python
+ versions 2.2 and later. Previous versions don't have the
+ StopIteration exception and so the method should raise an
+ IndexError instead.
+
+ Warning Message: "DB-API extension cursor.next() used"
+
+ Cursor Method .__iter__()
+
+ Return self to make cursors compatible to the iteration protocol.
+
+ Warning Message: "DB-API extension cursor.__iter__() used"
+
+ Cursor Attribute .lastrowid
+
+ This read-only attribute provides the rowid of the last
+ modified row (most databases return a rowid only when a single
+ INSERT operation is performed). If the operation does not set
+ a rowid or if the database does not support rowids, this
+ attribute should be set to None.
+
+ The semantics of .lastrowid are undefined in case the last
+ executed statement modified more than one row, e.g. when
+ using INSERT with .executemany().
+
+ Warning Message: "DB-API extension cursor.lastrowid used"
+
+
+Optional Error Handling Extension
+
+ The core DB API specification only introduces a set of exceptions
+ which can be raised to report errors to the user. In some cases,
+ exceptions may be too disruptive for the flow of a program or even
+ render execution impossible.
+
+ For these cases and in order to simplify error handling when
+ dealing with databases, database module authors may choose to
+ implement user defineable error handlers. This section describes a
+ standard way of defining these error handlers.
+
+ Cursor/Connection Attribute .errorhandler
+
+ Read/write attribute which references an error handler to call
+ in case an error condition is met.
+
+ The handler must be a Python callable taking the following
+ arguments: errorhandler(connection, cursor, errorclass,
+ errorvalue) where connection is a reference to the connection
+ on which the cursor operates, cursor a reference to the cursor
+ (or None in case the error does not apply to a cursor),
+ errorclass is an error class which to instantiate using
+ errorvalue as construction argument.
+
+ The standard error handler should add the error information to
+ the appropriate .messages attribute (connection.messages or
+ cursor.messages) and raise the exception defined by the given
+ errorclass and errorvalue parameters.
+
+ If no errorhandler is set (the attribute is None), the standard
+ error handling scheme as outlined above, should be applied.
+
+ Warning Message: "DB-API extension .errorhandler used"
+
+ Cursors should inherit the .errorhandler setting from their
+ connection objects at cursor creation time.
+
+
+Frequently Asked Questions
+
+ The database SIG often sees reoccurring questions about the DB API
+ specification. This section covers some of the issues people
+ sometimes have with the specification.
+
+ Question:
+
+ How can I construct a dictionary out of the tuples returned by
+ .fetchxxx():
+
+ Answer:
+
+ There are several existing tools available which provide
+ helpers for this task. Most of them use the approach of using
+ the column names defined in the cursor attribute .description
+ as basis for the keys in the row dictionary.
+
+ Note that the reason for not extending the DB API specification
+ to also support dictionary return values for the .fetchxxx()
+ methods is that this approach has several drawbacks:
+
+ * Some databases don't support case-sensitive column names or
+ auto-convert them to all lowercase or all uppercase
+ characters.
+
+ * Columns in the result set which are generated by the query
+ (e.g. using SQL functions) don't map to table column names
+ and databases usually generate names for these columns in a
+ very database specific way.
+
+ As a result, accessing the columns through dictionary keys
+ varies between databases and makes writing portable code
+ impossible.
+
+
+Major Changes from Version 1.0 to Version 2.0
+
+ The Python Database API 2.0 introduces a few major changes
+ compared to the 1.0 version. Because some of these changes will
+ cause existing DB API 1.0 based scripts to break, the major
+ version number was adjusted to reflect this change.
+
+ These are the most important changes from 1.0 to 2.0:
+
+ * The need for a separate dbi module was dropped and the
+ functionality merged into the module interface itself.
+
+ * New constructors and Type Objects were added for date/time
+ values, the RAW Type Object was renamed to BINARY. The
+ resulting set should cover all basic data types commonly
+ found in modern SQL databases.
+
+ * New constants (apilevel, threadlevel, paramstyle) and
+ methods (executemany, nextset) were added to provide better
+ database bindings.
+
+ * The semantics of .callproc() needed to call stored
+ procedures are now clearly defined.
+
+ * The definition of the .execute() return value changed.
+ Previously, the return value was based on the SQL statement
+ type (which was hard to implement right) -- it is undefined
+ now; use the more flexible .rowcount attribute
+ instead. Modules are free to return the old style return
+ values, but these are no longer mandated by the
+ specification and should be considered database interface
+ dependent.
+
+ * Class based exceptions were incorporated into the
+ specification. Module implementors are free to extend the
+ exception layout defined in this specification by
+ subclassing the defined exception classes.
+
+ Post-publishing additions to the DB API 2.0 specification:
+
+ * Additional optional DB API extensions to the set of
+ core functionality were specified.
+
+
+Open Issues
+
+ Although the version 2.0 specification clarifies a lot of
+ questions that were left open in the 1.0 version, there are still
+ some remaining issues which should be addressed in future
+ versions:
+
+ * Define a useful return value for .nextset() for the case where
+ a new result set is available.
+
+ * Create a fixed point numeric type for use as loss-less
+ monetary and decimal interchange format.
+
+
+Footnotes
+
+ [1] As a guideline the connection constructor parameters should be
+ implemented as keyword parameters for more intuitive use and
+ follow this order of parameters:
+
+ dsn Data source name as string
+ user User name as string (optional)
+ password Password as string (optional)
+ host Hostname (optional)
+ database Database name (optional)
+
+ E.g. a connect could look like this:
+
+ connect(dsn='myhost:MYDB',user='guido',password='234$')
+
+ [2] Module implementors should prefer 'numeric', 'named' or
+ 'pyformat' over the other formats because these offer more
+ clarity and flexibility.
+
+ [3] If the database does not support the functionality required
+ by the method, the interface should throw an exception in
+ case the method is used.
+
+ The preferred approach is to not implement the method and
+ thus have Python generate an AttributeError in
+ case the method is requested. This allows the programmer to
+ check for database capabilities using the standard
+ hasattr() function.
+
+ For some dynamically configured interfaces it may not be
+ appropriate to require dynamically making the method
+ available. These interfaces should then raise a
+ NotSupportedError to indicate the non-ability
+ to perform the roll back when the method is invoked.
+
+ [4] a database interface may choose to support named cursors by
+ allowing a string argument to the method. This feature is
+ not part of the specification, since it complicates
+ semantics of the .fetchXXX() methods.
+
+ [5] The module will use the __getitem__ method of the parameters
+ object to map either positions (integers) or names (strings)
+ to parameter values. This allows for both sequences and
+ mappings to be used as input.
+
+ The term "bound" refers to the process of binding an input
+ value to a database execution buffer. In practical terms,
+ this means that the input value is directly used as a value
+ in the operation. The client should not be required to
+ "escape" the value so that it can be used -- the value
+ should be equal to the actual database value.
+
+ [6] Note that the interface may implement row fetching using
+ arrays and other optimizations. It is not
+ guaranteed that a call to this method will only move the
+ associated cursor forward by one row.
+
+ [7] The rowcount attribute may be coded in a way that updates
+ its value dynamically. This can be useful for databases that
+ return usable rowcount values only after the first call to
+ a .fetchXXX() method.
+
+Acknowledgements
+
+ Many thanks go to Andrew Kuchling who converted the Python
+ Database API Specification 2.0 from the original HTML format into
+ the PEP format.
+
+Copyright
+
+ This document has been placed in the Public Domain.
+
+
+
+Local Variables:
+mode: indented-text
+indent-tabs-mode: nil
+End:
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..9449985
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,8 @@
+# Packages only needed to build the docs
+Pygments>=2.2,<2.3
+Sphinx>=1.6,<=1.7
+sphinx-better-theme>=0.1.5,<0.2
+
+# 0.15.2 affected by https://sourceforge.net/p/docutils/bugs/353/
+# Can update to 0.16 after release (currently in rc) but must update Sphinx too
+docutils<0.15
diff --git a/doc/src/Makefile b/doc/src/Makefile
new file mode 100644
index 0000000..53d0680
--- /dev/null
+++ b/doc/src/Makefile
@@ -0,0 +1,99 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# DSN for the doctest database
+PSYCOPG2_DSN="user=postgres dbname=test"
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+ -rm -rf ./html/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text pages are in $(BUILDDIR)/text."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/psycopg.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/psycopg.qhc"
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ PSYCOPG2_DSN=$(PSYCOPG2_DSN) \
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/src/_static/psycopg.css b/doc/src/_static/psycopg.css
new file mode 100644
index 0000000..f7ff756
--- /dev/null
+++ b/doc/src/_static/psycopg.css
@@ -0,0 +1,136 @@
+blockquote {
+ font-style: italic;
+}
+
+div.admonition-todo {
+ background-color: #ffa;
+ border: 1px solid #ee2;
+}
+
+div.dbapi-extension {
+ background-color: #eef;
+ border: 1px solid #aaf;
+}
+
+code.sql,
+tt.sql {
+ font-size: 1em;
+ background-color: transparent;
+}
+
+a > code.sql,
+a > tt.sql {
+ font-weight: normal;
+}
+
+a > code.sql:hover,
+a > tt.sql:hover {
+ text-decoration: underline;
+}
+
+dl.faq dt {
+ font-weight: bold;
+}
+
+table.data-types div.line-block {
+ margin-bottom: 0;
+}
+
+
+/* better theme customisation */
+
+body {
+ background-color: #216464;
+}
+
+header, .related, .document, footer {
+ background-color: white;
+}
+
+header h1 {
+ font-size: 150%;
+ margin-bottom: 0;
+ padding: 0.5rem 10px 0.5rem 10px;
+}
+
+h1, h2, h3 {
+ font-weight: normal;
+}
+
+.body h1, .body h2, .body h3 {
+ color: #074848;
+}
+
+h1 {
+ font-size: 200%;
+}
+
+h2 {
+ font-size: 160%;
+}
+
+h3 {
+ font-size: 140%;
+}
+
+footer#pagefooter {
+ margin-bottom: 1rem;
+ font-size: 85%;
+ color: #444;
+}
+
+#rellinks, #breadcrumbs {
+ padding-right: 10px;
+ padding-left: 10px;
+}
+
+.sphinxsidebar {
+ padding-left: 10px;
+}
+
+.bodywrapper {
+ padding-right: 10px;
+}
+
+div.body h1, div.body h2, div.body h3 {
+ background-color: #f2f2f2;
+ border-bottom: 1px solid #d0d0d0;
+}
+
+div.body p.rubric {
+ border-bottom: 1px solid #d0d0d0;
+}
+
+body .sphinxsidebar .search {
+ margin-top: 0;
+}
+
+html pre {
+ background-color: #efc;
+ border: 1px solid #ac9;
+ border-left: none;
+ border-right: none;
+}
+
+a, a:visited {
+ color: #0b6868;
+}
+
+th {
+ background-color: #ede;
+}
+
+code.xref, a code {
+ font-weight: bold;
+}
+
+code.descname {
+ font-weight: bold;
+ font-size: 120%;
+}
+
+@media (max-width: 820px) {
+ body {
+ background-color: white;
+ }
+}
diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst
new file mode 100644
index 0000000..28c4be9
--- /dev/null
+++ b/doc/src/advanced.rst
@@ -0,0 +1,599 @@
+More advanced topics
+====================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. testsetup:: *
+
+ import re
+ import select
+
+ cur.execute("CREATE TABLE atable (apoint point)")
+ conn.commit()
+
+ def wait(conn):
+ while True:
+ state = conn.poll()
+ if state == psycopg2.extensions.POLL_OK:
+ break
+ elif state == psycopg2.extensions.POLL_WRITE:
+ select.select([], [conn.fileno()], [])
+ elif state == psycopg2.extensions.POLL_READ:
+ select.select([conn.fileno()], [], [])
+ else:
+ raise psycopg2.OperationalError("poll() returned %s" % state)
+
+ aconn = psycopg2.connect(database='test', async=1)
+ wait(aconn)
+ acurs = aconn.cursor()
+
+
+.. index::
+ double: Subclassing; Cursor
+ double: Subclassing; Connection
+
+.. _subclassing-connection:
+.. _subclassing-cursor:
+
+Connection and cursor factories
+-------------------------------
+
+Psycopg exposes two new-style classes that can be sub-classed and expanded to
+adapt them to the needs of the programmer: `psycopg2.extensions.cursor`
+and `psycopg2.extensions.connection`. The `connection` class is
+usually sub-classed only to provide an easy way to create customized cursors
+but other uses are possible. `cursor` is much more interesting, because
+it is the class where query building, execution and result type-casting into
+Python variables happens.
+
+The `~psycopg2.extras` module contains several examples of :ref:`connection
+and cursor subclasses <cursor-subclasses>`.
+
+.. note::
+
+ If you only need a customized cursor class, since Psycopg 2.5 you can use
+ the `~connection.cursor_factory` parameter of a regular connection instead
+ of creating a new `!connection` subclass.
+
+
+.. index::
+ single: Example; Cursor subclass
+
+An example of cursor subclass performing logging is::
+
+ import psycopg2
+ import psycopg2.extensions
+ import logging
+
+ class LoggingCursor(psycopg2.extensions.cursor):
+ def execute(self, sql, args=None):
+ logger = logging.getLogger('sql_debug')
+ logger.info(self.mogrify(sql, args))
+
+ try:
+ psycopg2.extensions.cursor.execute(self, sql, args)
+ except Exception, exc:
+ logger.error("%s: %s" % (exc.__class__.__name__, exc))
+ raise
+
+ conn = psycopg2.connect(DSN)
+ cur = conn.cursor(cursor_factory=LoggingCursor)
+ cur.execute("INSERT INTO mytable VALUES (%s, %s, %s);",
+ (10, 20, 30))
+
+
+
+.. index::
+ single: Objects; Creating new adapters
+ single: Adaptation; Creating new adapters
+ single: Data types; Creating new adapters
+
+.. _adapting-new-types:
+
+Adapting new Python types to SQL syntax
+---------------------------------------
+
+Any Python class or type can be adapted to an SQL string. Adaptation mechanism
+is similar to the Object Adaptation proposed in the :pep:`246` and is exposed
+by the `psycopg2.extensions.adapt()` function.
+
+The `~cursor.execute()` method adapts its arguments to the
+`~psycopg2.extensions.ISQLQuote` protocol. Objects that conform to this
+protocol expose a `!getquoted()` method returning the SQL representation
+of the object as a string (the method must return `!bytes` in Python 3).
+Optionally the conform object may expose a
+`~psycopg2.extensions.ISQLQuote.prepare()` method.
+
+There are two basic ways to have a Python object adapted to SQL:
+
+- the object itself is conform, or knows how to make itself conform. Such
+ object must expose a `__conform__()` method that will be called with the
+ protocol object as argument. The object can check that the protocol is
+ `!ISQLQuote`, in which case it can return `!self` (if the object also
+ implements `!getquoted()`) or a suitable wrapper object. This option is
+ viable if you are the author of the object and if the object is specifically
+ designed for the database (i.e. having Psycopg as a dependency and polluting
+ its interface with the required methods doesn't bother you). For a simple
+ example you can take a look at the source code for the
+ `psycopg2.extras.Inet` object.
+
+- If implementing the `!ISQLQuote` interface directly in the object is not an
+ option (maybe because the object to adapt comes from a third party library),
+ you can use an *adaptation function*, taking the object to be adapted as
+ argument and returning a conforming object. The adapter must be
+ registered via the `~psycopg2.extensions.register_adapter()` function. A
+ simple example wrapper is `!psycopg2.extras.UUID_adapter` used by the
+ `~psycopg2.extras.register_uuid()` function.
+
+A convenient object to write adapters is the `~psycopg2.extensions.AsIs`
+wrapper, whose `!getquoted()` result is simply the `!str()`\ ing conversion of
+the wrapped object.
+
+.. index::
+ single: Example; Types adaptation
+
+Example: mapping of a `!Point` class into the |point|_ PostgreSQL
+geometric type:
+
+.. doctest::
+
+ >>> from psycopg2.extensions import adapt, register_adapter, AsIs
+
+ >>> class Point(object):
+ ... def __init__(self, x, y):
+ ... self.x = x
+ ... self.y = y
+
+ >>> def adapt_point(point):
+ ... x = adapt(point.x).getquoted()
+ ... y = adapt(point.y).getquoted()
+ ... return AsIs("'(%s, %s)'" % (x, y))
+
+ >>> register_adapter(Point, adapt_point)
+
+ >>> cur.execute("INSERT INTO atable (apoint) VALUES (%s)",
+ ... (Point(1.23, 4.56),))
+
+
+.. |point| replace:: :sql:`point`
+.. _point: https://www.postgresql.org/docs/current/static/datatype-geometric.html#DATATYPE-GEOMETRIC
+
+The above function call results in the SQL command::
+
+ INSERT INTO atable (apoint) VALUES ('(1.23, 4.56)');
+
+
+
+.. index:: Type casting
+
+.. _type-casting-from-sql-to-python:
+
+Type casting of SQL types into Python objects
+---------------------------------------------
+
+PostgreSQL objects read from the database can be adapted to Python objects
+through an user-defined adapting function. An adapter function takes two
+arguments: the object string representation as returned by PostgreSQL and the
+cursor currently being read, and should return a new Python object. For
+example, the following function parses the PostgreSQL :sql:`point`
+representation into the previously defined `!Point` class:
+
+ >>> def cast_point(value, cur):
+ ... if value is None:
+ ... return None
+ ...
+ ... # Convert from (f1, f2) syntax using a regular expression.
+ ... m = re.match(r"\(([^)]+),([^)]+)\)", value)
+ ... if m:
+ ... return Point(float(m.group(1)), float(m.group(2)))
+ ... else:
+ ... raise InterfaceError("bad point representation: %r" % value)
+
+
+In order to create a mapping from a PostgreSQL type (either standard or
+user-defined), its OID must be known. It can be retrieved either by the second
+column of the `cursor.description`:
+
+ >>> cur.execute("SELECT NULL::point")
+ >>> point_oid = cur.description[0][1]
+ >>> point_oid
+ 600
+
+or by querying the system catalog for the type name and namespace (the
+namespace for system objects is :sql:`pg_catalog`):
+
+ >>> cur.execute("""
+ ... SELECT pg_type.oid
+ ... FROM pg_type JOIN pg_namespace
+ ... ON typnamespace = pg_namespace.oid
+ ... WHERE typname = %(typename)s
+ ... AND nspname = %(namespace)s""",
+ ... {'typename': 'point', 'namespace': 'pg_catalog'})
+ >>> point_oid = cur.fetchone()[0]
+ >>> point_oid
+ 600
+
+After you know the object OID, you can create and register the new type:
+
+ >>> POINT = psycopg2.extensions.new_type((point_oid,), "POINT", cast_point)
+ >>> psycopg2.extensions.register_type(POINT)
+
+The `~psycopg2.extensions.new_type()` function binds the object OIDs
+(more than one can be specified) to the adapter function.
+`~psycopg2.extensions.register_type()` completes the spell. Conversion
+is automatically performed when a column whose type is a registered OID is
+read:
+
+ >>> cur.execute("SELECT '(10.2,20.3)'::point")
+ >>> point = cur.fetchone()[0]
+ >>> print type(point), point.x, point.y
+ <class 'Point'> 10.2 20.3
+
+A typecaster created by `!new_type()` can be also used with
+`~psycopg2.extensions.new_array_type()` to create a typecaster converting a
+PostgreSQL array into a Python list.
+
+
+.. index::
+ pair: Asynchronous; Notifications
+ pair: LISTEN; SQL command
+ pair: NOTIFY; SQL command
+
+.. _async-notify:
+
+Asynchronous notifications
+--------------------------
+
+Psycopg allows asynchronous interaction with other database sessions using the
+facilities offered by PostgreSQL commands |LISTEN|_ and |NOTIFY|_. Please
+refer to the PostgreSQL documentation for examples about how to use this form of
+communication.
+
+Notifications are instances of the `~psycopg2.extensions.Notify` object made
+available upon reception in the `connection.notifies` list. Notifications can
+be sent from Python code simply executing a :sql:`NOTIFY` command in an
+`~cursor.execute()` call.
+
+Because of the way sessions interact with notifications (see |NOTIFY|_
+documentation), you should keep the connection in `~connection.autocommit`
+mode if you wish to receive or send notifications in a timely manner.
+
+.. |LISTEN| replace:: :sql:`LISTEN`
+.. _LISTEN: https://www.postgresql.org/docs/current/static/sql-listen.html
+.. |NOTIFY| replace:: :sql:`NOTIFY`
+.. _NOTIFY: https://www.postgresql.org/docs/current/static/sql-notify.html
+
+Notifications are received after every query execution. If the user is
+interested in receiving notifications but not in performing any query, the
+`~connection.poll()` method can be used to check for new messages without
+wasting resources.
+
+A simple application could poll the connection from time to time to check if
+something new has arrived. A better strategy is to use some I/O completion
+function such as :py:func:`~select.select` to sleep until awakened by the kernel when there is
+some data to read on the connection, thereby using no CPU unless there is
+something to read::
+
+ import select
+ import psycopg2
+ import psycopg2.extensions
+
+ conn = psycopg2.connect(DSN)
+ conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ curs = conn.cursor()
+ curs.execute("LISTEN test;")
+
+ print "Waiting for notifications on channel 'test'"
+ while True:
+ if select.select([conn],[],[],5) == ([],[],[]):
+ print "Timeout"
+ else:
+ conn.poll()
+ while conn.notifies:
+ notify = conn.notifies.pop(0)
+ print "Got NOTIFY:", notify.pid, notify.channel, notify.payload
+
+Running the script and executing a command such as :sql:`NOTIFY test, 'hello'`
+in a separate :program:`psql` shell, the output may look similar to:
+
+.. code-block:: none
+
+ Waiting for notifications on channel 'test'
+ Timeout
+ Timeout
+ Got NOTIFY: 6535 test hello
+ Timeout
+ ...
+
+Note that the payload is only available from PostgreSQL 9.0: notifications
+received from a previous version server will have the
+`~psycopg2.extensions.Notify.payload` attribute set to the empty string.
+
+.. versionchanged:: 2.3
+ Added `~psycopg2.extensions.Notify` object and handling notification
+ payload.
+
+.. versionchanged:: 2.7
+ The `~connection.notifies` attribute is writable: it is possible to
+ replace it with any object exposing an `!append()` method. An useful
+ example would be to use a `~collections.deque` object.
+
+
+.. index::
+ double: Asynchronous; Connection
+
+.. _async-support:
+
+Asynchronous support
+--------------------
+
+.. versionadded:: 2.2
+
+Psycopg can issue asynchronous queries to a PostgreSQL database. An asynchronous
+communication style is established passing the parameter *async*\=1 to the
+`~psycopg2.connect()` function: the returned connection will work in
+*asynchronous mode*.
+
+In asynchronous mode, a Psycopg connection will rely on the caller to poll the
+socket file descriptor, checking if it is ready to accept data or if a query
+result has been transferred and is ready to be read on the client. The caller
+can use the method `~connection.fileno()` to get the connection file
+descriptor and `~connection.poll()` to make communication proceed according to
+the current connection state.
+
+The following is an example loop using methods `!fileno()` and `!poll()`
+together with the Python :py:func:`~select.select` function in order to carry on
+asynchronous operations with Psycopg::
+
+ def wait(conn):
+ while True:
+ state = conn.poll()
+ if state == psycopg2.extensions.POLL_OK:
+ break
+ elif state == psycopg2.extensions.POLL_WRITE:
+ select.select([], [conn.fileno()], [])
+ elif state == psycopg2.extensions.POLL_READ:
+ select.select([conn.fileno()], [], [])
+ else:
+ raise psycopg2.OperationalError("poll() returned %s" % state)
+
+The above loop of course would block an entire application: in a real
+asynchronous framework, `!select()` would be called on many file descriptors
+waiting for any of them to be ready. Nonetheless the function can be used to
+connect to a PostgreSQL server only using nonblocking commands and the
+connection obtained can be used to perform further nonblocking queries. After
+`!poll()` has returned `~psycopg2.extensions.POLL_OK`, and thus `!wait()` has
+returned, the connection can be safely used:
+
+ >>> aconn = psycopg2.connect(database='test', async=1)
+ >>> wait(aconn)
+ >>> acurs = aconn.cursor()
+
+Note that there are a few other requirements to be met in order to have a
+completely non-blocking connection attempt: see the libpq documentation for
+|PQconnectStart|_.
+
+.. |PQconnectStart| replace:: `!PQconnectStart()`
+.. _PQconnectStart: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNECTSTARTPARAMS
+
+The same loop should be also used to perform nonblocking queries: after
+sending a query via `~cursor.execute()` or `~cursor.callproc()`, call
+`!poll()` on the connection available from `cursor.connection` until it
+returns `!POLL_OK`, at which point the query has been completely sent to the
+server and, if it produced data, the results have been transferred to the
+client and available using the regular cursor methods:
+
+ >>> acurs.execute("SELECT pg_sleep(5); SELECT 42;")
+ >>> wait(acurs.connection)
+ >>> acurs.fetchone()[0]
+ 42
+
+When an asynchronous query is being executed, `connection.isexecuting()` returns
+`!True`. Two cursors can't execute concurrent queries on the same asynchronous
+connection.
+
+There are several limitations in using asynchronous connections: the
+connection is always in `~connection.autocommit` mode and it is not
+possible to change it. So a
+transaction is not implicitly started at the first query and is not possible
+to use methods `~connection.commit()` and `~connection.rollback()`: you can
+manually control transactions using `~cursor.execute()` to send database
+commands such as :sql:`BEGIN`, :sql:`COMMIT` and :sql:`ROLLBACK`. Similarly
+`~connection.set_session()` can't be used but it is still possible to invoke the
+:sql:`SET` command with the proper :sql:`default_transaction_...` parameter.
+
+With asynchronous connections it is also not possible to use
+`~connection.set_client_encoding()`, `~cursor.executemany()`, :ref:`large
+objects <large-objects>`, :ref:`named cursors <server-side-cursors>`.
+
+:ref:`COPY commands <copy>` are not supported either in asynchronous mode, but
+this will be probably implemented in a future release.
+
+
+
+
+.. index::
+ single: Greenlet
+ single: Coroutine
+ single: Eventlet
+ single: gevent
+ single: Wait callback
+
+.. _green-support:
+
+Support for coroutine libraries
+-------------------------------
+
+.. versionadded:: 2.2
+
+Psycopg can be used together with coroutine_\-based libraries and participate
+in cooperative multithreading.
+
+Coroutine-based libraries (such as Eventlet_ or gevent_) can usually patch the
+Python standard library in order to enable a coroutine switch in the presence of
+blocking I/O: the process is usually referred as making the system *green*, in
+reference to the `green threads`_.
+
+Because Psycopg is a C extension module, it is not possible for coroutine
+libraries to patch it: Psycopg instead enables cooperative multithreading by
+allowing the registration of a *wait callback* using the
+`psycopg2.extensions.set_wait_callback()` function. When a wait callback is
+registered, Psycopg will use `libpq non-blocking calls`__ instead of the regular
+blocking ones, and will delegate to the callback the responsibility to wait
+for the socket to become readable or writable.
+
+Working this way, the caller does not have the complete freedom to schedule the
+socket check whenever they want as with an :ref:`asynchronous connection
+<async-support>`, but has the advantage of maintaining a complete |DBAPI|
+semantics: from the point of view of the end user, all Psycopg functions and
+objects will work transparently in the coroutine environment (blocking the
+calling green thread and giving other green threads the possibility to be
+scheduled), allowing non modified code and third party libraries (such as
+SQLAlchemy_) to be used in coroutine-based programs.
+
+.. warning::
+ Psycopg connections are not *green thread safe* and can't be used
+ concurrently by different green threads. Trying to execute more than one
+ command at time using one cursor per thread will result in an error (or a
+ deadlock on versions before 2.4.2).
+
+ Therefore, programmers are advised to either avoid sharing connections
+ between coroutines or to use a library-friendly lock to synchronize shared
+ connections, e.g. for pooling.
+
+Coroutine libraries authors should provide a callback implementation (and
+possibly a method to register it) to make Psycopg as green as they want. An
+example callback (using `!select()` to block) is provided as
+`psycopg2.extras.wait_select()`: it boils down to something similar to::
+
+ def wait_select(conn):
+ while True:
+ state = conn.poll()
+ if state == extensions.POLL_OK:
+ break
+ elif state == extensions.POLL_READ:
+ select.select([conn.fileno()], [], [])
+ elif state == extensions.POLL_WRITE:
+ select.select([], [conn.fileno()], [])
+ else:
+ raise OperationalError("bad state from poll: %s" % state)
+
+Providing callback functions for the single coroutine libraries is out of
+psycopg2 scope, as the callback can be tied to the libraries' implementation
+details. You can check the `psycogreen`_ project for further informations and
+resources about the topic.
+
+.. _coroutine: https://en.wikipedia.org/wiki/Coroutine
+.. _greenlet: https://pypi.org/project/greenlet/
+.. _green threads: https://en.wikipedia.org/wiki/Green_threads
+.. _Eventlet: https://eventlet.net/
+.. _gevent: http://www.gevent.org/
+.. _SQLAlchemy: https://www.sqlalchemy.org/
+.. _psycogreen: https://github.com/psycopg/psycogreen/
+.. __: https://www.postgresql.org/docs/current/static/libpq-async.html
+
+.. warning::
+
+ :ref:`COPY commands <copy>` are currently not supported when a wait callback
+ is registered, but they will be probably implemented in a future release.
+
+ :ref:`Large objects <large-objects>` are not supported either: they are
+ not compatible with asynchronous connections.
+
+
+.. testcode::
+ :hide:
+
+ aconn.close()
+ conn.rollback()
+ cur.execute("DROP TABLE atable")
+ conn.commit()
+ cur.close()
+ conn.close()
+
+
+
+.. index::
+ single: Replication
+
+.. _replication-support:
+
+Replication protocol support
+----------------------------
+
+.. versionadded:: 2.7
+
+Modern PostgreSQL servers (version 9.0 and above) support replication. The
+replication protocol is built on top of the client-server protocol and can be
+operated using ``libpq``, as such it can be also operated by ``psycopg2``.
+The replication protocol can be operated on both synchronous and
+:ref:`asynchronous <async-support>` connections.
+
+Server version 9.4 adds a new feature called *Logical Replication*.
+
+.. seealso::
+
+ - PostgreSQL `Streaming Replication Protocol`__
+
+ .. __: https://www.postgresql.org/docs/current/static/protocol-replication.html
+
+
+Logical replication Quick-Start
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You must be using PostgreSQL server version 9.4 or above to run this quick
+start.
+
+Make sure that replication connections are permitted for user ``postgres`` in
+``pg_hba.conf`` and reload the server configuration. You also need to set
+``wal_level=logical`` and ``max_wal_senders``, ``max_replication_slots`` to
+value greater than zero in ``postgresql.conf`` (these changes require a server
+restart). Create a database ``psycopg2_test``.
+
+Then run the following code to quickly try the replication support out. This
+is not production code -- it's only intended as a simple demo of logical
+replication::
+
+ from __future__ import print_function
+ import sys
+ import psycopg2
+ import psycopg2.extras
+
+ conn = psycopg2.connect('dbname=psycopg2_test user=postgres',
+ connection_factory=psycopg2.extras.LogicalReplicationConnection)
+ cur = conn.cursor()
+ try:
+ # test_decoding produces textual output
+ cur.start_replication(slot_name='pytest', decode=True)
+ except psycopg2.ProgrammingError:
+ cur.create_replication_slot('pytest', output_plugin='test_decoding')
+ cur.start_replication(slot_name='pytest', decode=True)
+
+ class DemoConsumer(object):
+ def __call__(self, msg):
+ print(msg.payload)
+ msg.cursor.send_feedback(flush_lsn=msg.data_start)
+
+ democonsumer = DemoConsumer()
+
+ print("Starting streaming, press Control-C to end...", file=sys.stderr)
+ try:
+ cur.consume_stream(democonsumer)
+ except KeyboardInterrupt:
+ cur.close()
+ conn.close()
+ print("The slot 'pytest' still exists. Drop it with "
+ "SELECT pg_drop_replication_slot('pytest'); if no longer needed.",
+ file=sys.stderr)
+ print("WARNING: Transaction logs will accumulate in pg_xlog "
+ "until the slot is dropped.", file=sys.stderr)
+
+
+You can now make changes to the ``psycopg2_test`` database using a normal
+psycopg2 session, ``psql``, etc. and see the logical decoding stream printed
+by this demo client.
+
+This will continue running until terminated with ``Control-C``.
+
+For the details see :ref:`replication-objects`.
diff --git a/doc/src/conf.py b/doc/src/conf.py
new file mode 100644
index 0000000..c40c493
--- /dev/null
+++ b/doc/src/conf.py
@@ -0,0 +1,288 @@
+#
+# Psycopg documentation build configuration file, created by
+# sphinx-quickstart on Sun Feb 7 13:48:41 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import os
+import sys
+from better import better_theme_path
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.append(os.path.abspath('tools/lib'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.ifconfig',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.intersphinx',
+]
+
+# Specific extensions for Psycopg documentation.
+extensions += ['dbapi_extension', 'sql_role', 'ticket_role']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+# source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'Psycopg'
+copyright = (
+ '2001-2021, Federico Di Gregorio, Daniele Varrazzo, The Psycopg Team'
+)
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '2.0'
+
+# The full version, including alpha/beta/rc tags.
+try:
+ import psycopg2
+except ImportError:
+ print("WARNING: couldn't import psycopg to read version.")
+ release = version
+else:
+ release = psycopg2.__version__.split()[0]
+ version = '.'.join(release.split('.')[:2])
+
+intersphinx_mapping = {'py': ('https://docs.python.org/3', None)}
+
+# Pattern to generate links to the bug tracker
+ticket_url = 'https://github.com/psycopg/psycopg2/issues/%s'
+ticket_remap_until = 25
+ticket_remap_offset = 230
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+# language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+# unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build', 'html']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+default_role = 'obj'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# Using 'python' instead of the default gives warnings if parsing an example
+# fails, instead of defaulting to none
+highlight_language = 'python'
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# Include TODO items in the documentation
+todo_include_todos = False
+
+rst_epilog = """
+.. |DBAPI| replace:: DB API 2.0
+
+.. _DBAPI: https://www.python.org/dev/peps/pep-0249/
+
+.. _transaction isolation level:
+ https://www.postgresql.org/docs/current/static/transaction-iso.html
+
+.. |MVCC| replace:: :abbr:`MVCC (Multiversion concurrency control)`
+"""
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'better'
+
+# The stylesheet to use with HTML output: this will include the original one
+# adding a few classes.
+# html_style = 'psycopg.css'
+
+# Hide the sphinx footer
+html_show_sphinx = False
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+html_theme_options = {
+ 'linktotheme': False,
+ 'cssfiles': ['_static/psycopg.css'],
+}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = [better_theme_path]
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+# html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+html_short_title = 'Home'
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+# html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# no need for the prev/next topic link using better theme: they are on top
+html_sidebars = {
+ '**': ['localtoc.html', 'searchbox.html'],
+}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_use_modindex = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'psycopgdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+# latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+# latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ (
+ 'index',
+ 'psycopg.tex',
+ 'Psycopg Documentation',
+ 'Federico Di Gregorio',
+ 'manual',
+ )
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+# latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_use_modindex = True
+
+
+doctest_global_setup = """
+
+import os
+import psycopg2
+
+def test_connect():
+ try:
+ dsn = os.environ['PSYCOPG2_DSN']
+ except KeyError:
+ assert False, "You need to set the environment variable PSYCOPG2_DSN" \
+ " in order to test the documentation!"
+ return psycopg2.connect(dsn)
+
+conn = test_connect()
+cur = conn.cursor()
+
+def drop_test_table(name):
+ cur.execute("SAVEPOINT drop_test_table;")
+ try:
+ cur.execute("DROP TABLE %s;" % name)
+ except:
+ cur.execute("ROLLBACK TO SAVEPOINT drop_test_table;")
+ conn.commit()
+
+def create_test_table():
+ drop_test_table('test')
+ cur.execute("CREATE TABLE test (id SERIAL PRIMARY KEY, num INT, data TEXT)")
+ conn.commit()
+
+"""
diff --git a/doc/src/connection.rst b/doc/src/connection.rst
new file mode 100644
index 0000000..05ad140
--- /dev/null
+++ b/doc/src/connection.rst
@@ -0,0 +1,916 @@
+The ``connection`` class
+========================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. testsetup::
+
+ from pprint import pprint
+ import psycopg2.extensions
+
+ drop_test_table('foo')
+
+.. class:: connection
+
+ Handles the connection to a PostgreSQL database instance. It encapsulates
+ a database session.
+
+ Connections are created using the factory function
+ `~psycopg2.connect()`.
+
+ Connections are thread safe and can be shared among many threads. See
+ :ref:`thread-safety` for details.
+
+ Connections can be used as context managers. Note that a context wraps a
+ transaction: if the context exits with success the transaction is
+ committed, if it exits with an exception the transaction is rolled back.
+ Note that the connection is not closed by the context and it can be used
+ for several contexts.
+
+ .. code:: python
+
+ conn = psycopg2.connect(DSN)
+
+ with conn:
+ with conn.cursor() as curs:
+ curs.execute(SQL1)
+
+ with conn:
+ with conn.cursor() as curs:
+ curs.execute(SQL2)
+
+ # leaving contexts doesn't close the connection
+ conn.close()
+
+
+ .. method:: cursor(name=None, cursor_factory=None, scrollable=None, withhold=False)
+
+ Return a new `cursor` object using the connection.
+
+ If *name* is specified, the returned cursor will be a :ref:`server
+ side cursor <server-side-cursors>` (also known as *named cursor*).
+ Otherwise it will be a regular *client side* cursor. By default a
+ named cursor is declared without :sql:`SCROLL` option and
+ :sql:`WITHOUT HOLD`: set the argument or property `~cursor.scrollable`
+ to `!True`/`!False` and or `~cursor.withhold` to `!True` to change the
+ declaration.
+
+ The name can be a string not valid as a PostgreSQL identifier: for
+ example it may start with a digit and contain non-alphanumeric
+ characters and quotes.
+
+ .. versionchanged:: 2.4
+ previously only valid PostgreSQL identifiers were accepted as
+ cursor name.
+
+ The *cursor_factory* argument can be used to create non-standard
+ cursors. The class returned must be a subclass of
+ `psycopg2.extensions.cursor`. See :ref:`subclassing-cursor` for
+ details. A default factory for the connection can also be specified
+ using the `~connection.cursor_factory` attribute.
+
+ .. versionchanged:: 2.4.3 added the *withhold* argument.
+ .. versionchanged:: 2.5 added the *scrollable* argument.
+
+ .. extension::
+
+ All the function arguments are Psycopg extensions to the |DBAPI|.
+
+
+ .. index::
+ pair: Transaction; Commit
+
+ .. method:: commit()
+
+ Commit any pending transaction to the database.
+
+ By default, Psycopg opens a transaction before executing the first
+ command: if `!commit()` is not called, the effect of any data
+ manipulation will be lost.
+
+ The connection can be also set in "autocommit" mode: no transaction is
+ automatically open, commands have immediate effect. See
+ :ref:`transactions-control` for details.
+
+ .. versionchanged:: 2.5 if the connection is used in a ``with``
+ statement, the method is automatically called if no exception is
+ raised in the ``with`` block.
+
+
+ .. index::
+ pair: Transaction; Rollback
+
+ .. method:: rollback()
+
+ Roll back to the start of any pending transaction. Closing a
+ connection without committing the changes first will cause an implicit
+ rollback to be performed.
+
+ .. versionchanged:: 2.5 if the connection is used in a ``with``
+ statement, the method is automatically called if an exception is
+ raised in the ``with`` block.
+
+
+ .. method:: close()
+
+ Close the connection now (rather than whenever `del` is executed).
+ The connection will be unusable from this point forward; an
+ `~psycopg2.InterfaceError` will be raised if any operation is
+ attempted with the connection. The same applies to all cursor objects
+ trying to use the connection. Note that closing a connection without
+ committing the changes first will cause any pending change to be
+ discarded as if a :sql:`ROLLBACK` was performed (unless a different
+ isolation level has been selected: see
+ `~connection.set_isolation_level()`).
+
+ .. index::
+ single: PgBouncer; unclean server
+
+ .. versionchanged:: 2.2
+ previously an explicit :sql:`ROLLBACK` was issued by Psycopg on
+ `!close()`. The command could have been sent to the backend at an
+ inappropriate time, so Psycopg currently relies on the backend to
+ implicitly discard uncommitted changes. Some middleware are known
+ to behave incorrectly though when the connection is closed during
+ a transaction (when `~connection.status` is
+ `~psycopg2.extensions.STATUS_IN_TRANSACTION`), e.g. PgBouncer_
+ reports an ``unclean server`` and discards the connection. To
+ avoid this problem you can ensure to terminate the transaction
+ with a `~connection.commit()`/`~connection.rollback()` before
+ closing.
+
+ .. _PgBouncer: http://www.pgbouncer.org/
+
+
+ .. index::
+ single: Exceptions; In the connection class
+
+ .. rubric:: Exceptions as connection class attributes
+
+ The `!connection` also exposes as attributes the same exceptions
+ available in the `psycopg2` module. See :ref:`dbapi-exceptions`.
+
+
+
+ .. index::
+ single: Two-phase commit; methods
+
+ .. rubric:: Two-phase commit support methods
+
+ .. versionadded:: 2.3
+
+ .. seealso:: :ref:`tpc` for an introductory explanation of these methods.
+
+ Note that PostgreSQL supports two-phase commit since release 8.1: these
+ methods raise `~psycopg2.NotSupportedError` if used with an older version
+ server.
+
+
+ .. _tpc_methods:
+
+ .. method:: xid(format_id, gtrid, bqual)
+
+ Returns a `~psycopg2.extensions.Xid` instance to be passed to the
+ `!tpc_*()` methods of this connection. The argument types and
+ constraints are explained in :ref:`tpc`.
+
+ The values passed to the method will be available on the returned
+ object as the members `~psycopg2.extensions.Xid.format_id`,
+ `~psycopg2.extensions.Xid.gtrid`, `~psycopg2.extensions.Xid.bqual`.
+ The object also allows accessing to these members and unpacking as a
+ 3-items tuple.
+
+
+ .. method:: tpc_begin(xid)
+
+ Begins a TPC transaction with the given transaction ID *xid*.
+
+ This method should be called outside of a transaction (i.e. nothing
+ may have executed since the last `~connection.commit()` or
+ `~connection.rollback()` and `connection.status` is
+ `~psycopg2.extensions.STATUS_READY`).
+
+ Furthermore, it is an error to call `!commit()` or `!rollback()`
+ within the TPC transaction: in this case a `~psycopg2.ProgrammingError`
+ is raised.
+
+ The *xid* may be either an object returned by the `~connection.xid()`
+ method or a plain string: the latter allows to create a transaction
+ using the provided string as PostgreSQL transaction id. See also
+ `~connection.tpc_recover()`.
+
+
+ .. index::
+ pair: Transaction; Prepare
+
+ .. method:: tpc_prepare()
+
+ Performs the first phase of a transaction started with
+ `~connection.tpc_begin()`. A `~psycopg2.ProgrammingError` is raised if
+ this method is used outside of a TPC transaction.
+
+ After calling `!tpc_prepare()`, no statements can be executed until
+ `~connection.tpc_commit()` or `~connection.tpc_rollback()` will be
+ called. The `~connection.reset()` method can be used to restore the
+ status of the connection to `~psycopg2.extensions.STATUS_READY`: the
+ transaction will remain prepared in the database and will be
+ possible to finish it with `!tpc_commit(xid)` and
+ `!tpc_rollback(xid)`.
+
+ .. seealso:: the |PREPARE TRANSACTION|_ PostgreSQL command.
+
+ .. |PREPARE TRANSACTION| replace:: :sql:`PREPARE TRANSACTION`
+ .. _PREPARE TRANSACTION: https://www.postgresql.org/docs/current/static/sql-prepare-transaction.html
+
+
+ .. index::
+ pair: Commit; Prepared
+
+ .. method:: tpc_commit([xid])
+
+ When called with no arguments, `!tpc_commit()` commits a TPC
+ transaction previously prepared with `~connection.tpc_prepare()`.
+
+ If `!tpc_commit()` is called prior to `!tpc_prepare()`, a single phase
+ commit is performed. A transaction manager may choose to do this if
+ only a single resource is participating in the global transaction.
+
+ When called with a transaction ID *xid*, the database commits
+ the given transaction. If an invalid transaction ID is
+ provided, a `~psycopg2.ProgrammingError` will be raised. This form
+ should be called outside of a transaction, and is intended for use in
+ recovery.
+
+ On return, the TPC transaction is ended.
+
+ .. seealso:: the |COMMIT PREPARED|_ PostgreSQL command.
+
+ .. |COMMIT PREPARED| replace:: :sql:`COMMIT PREPARED`
+ .. _COMMIT PREPARED: https://www.postgresql.org/docs/current/static/sql-commit-prepared.html
+
+
+ .. index::
+ pair: Rollback; Prepared
+
+ .. method:: tpc_rollback([xid])
+
+ When called with no arguments, `!tpc_rollback()` rolls back a TPC
+ transaction. It may be called before or after
+ `~connection.tpc_prepare()`.
+
+ When called with a transaction ID *xid*, it rolls back the given
+ transaction. If an invalid transaction ID is provided, a
+ `~psycopg2.ProgrammingError` is raised. This form should be called
+ outside of a transaction, and is intended for use in recovery.
+
+ On return, the TPC transaction is ended.
+
+ .. seealso:: the |ROLLBACK PREPARED|_ PostgreSQL command.
+
+ .. |ROLLBACK PREPARED| replace:: :sql:`ROLLBACK PREPARED`
+ .. _ROLLBACK PREPARED: https://www.postgresql.org/docs/current/static/sql-rollback-prepared.html
+
+
+ .. index::
+ pair: Transaction; Recover
+
+ .. method:: tpc_recover()
+
+ Returns a list of `~psycopg2.extensions.Xid` representing pending
+ transactions, suitable for use with `tpc_commit()` or
+ `tpc_rollback()`.
+
+ If a transaction was not initiated by Psycopg, the returned Xids will
+ have attributes `~psycopg2.extensions.Xid.format_id` and
+ `~psycopg2.extensions.Xid.bqual` set to `!None` and the
+ `~psycopg2.extensions.Xid.gtrid` set to the PostgreSQL transaction ID: such Xids are still
+ usable for recovery. Psycopg uses the same algorithm of the
+ `PostgreSQL JDBC driver`__ to encode a XA triple in a string, so
+ transactions initiated by a program using such driver should be
+ unpacked correctly.
+
+ .. __: https://jdbc.postgresql.org/
+
+ Xids returned by `!tpc_recover()` also have extra attributes
+ `~psycopg2.extensions.Xid.prepared`, `~psycopg2.extensions.Xid.owner`,
+ `~psycopg2.extensions.Xid.database` populated with the values read
+ from the server.
+
+ .. seealso:: the |pg_prepared_xacts|_ system view.
+
+ .. |pg_prepared_xacts| replace:: `pg_prepared_xacts`
+ .. _pg_prepared_xacts: https://www.postgresql.org/docs/current/static/view-pg-prepared-xacts.html
+
+
+
+ .. extension::
+
+ The above methods are the only ones defined by the |DBAPI| protocol.
+ The Psycopg connection objects exports the following additional
+ methods and attributes.
+
+
+ .. attribute:: closed
+
+ Read-only integer attribute: 0 if the connection is open, nonzero if
+ it is closed or broken.
+
+
+ .. method:: cancel
+
+ Cancel the current database operation.
+
+ The method interrupts the processing of the current operation. If no
+ query is being executed, it does nothing. You can call this function
+ from a different thread than the one currently executing a database
+ operation, for instance if you want to cancel a long running query if a
+ button is pushed in the UI. Interrupting query execution will cause the
+ cancelled method to raise a
+ `~psycopg2.extensions.QueryCanceledError`. Note that the termination
+ of the query is not guaranteed to succeed: see the documentation for
+ |PQcancel|_.
+
+ .. |PQcancel| replace:: `!PQcancel()`
+ .. _PQcancel: https://www.postgresql.org/docs/current/static/libpq-cancel.html#LIBPQ-PQCANCEL
+
+ .. versionadded:: 2.3
+
+
+ .. method:: reset
+
+ Reset the connection to the default.
+
+ The method rolls back an eventual pending transaction and executes the
+ PostgreSQL |RESET|_ and |SET SESSION AUTHORIZATION|__ to revert the
+ session to the default values. A two-phase commit transaction prepared
+ using `~connection.tpc_prepare()` will remain in the database
+ available for recover.
+
+ .. |RESET| replace:: :sql:`RESET`
+ .. _RESET: https://www.postgresql.org/docs/current/static/sql-reset.html
+
+ .. |SET SESSION AUTHORIZATION| replace:: :sql:`SET SESSION AUTHORIZATION`
+ .. __: https://www.postgresql.org/docs/current/static/sql-set-session-authorization.html
+
+ .. versionadded:: 2.0.12
+
+
+ .. attribute:: dsn
+
+ Read-only string containing the connection string used by the
+ connection.
+
+ If a password was specified in the connection string it will be
+ obscured.
+
+
+
+ .. rubric:: Transaction control methods and attributes.
+
+ .. index::
+ pair: Transaction; Autocommit
+ pair: Transaction; Isolation level
+
+ .. method:: set_session(isolation_level=None, readonly=None, deferrable=None, autocommit=None)
+
+ Set one or more parameters for the next transactions or statements in
+ the current session.
+
+ :param isolation_level: set the `isolation level`_ for the next
+ transactions/statements. The value can be one of the literal
+ values ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE
+ READ``, ``SERIALIZABLE`` or the equivalent :ref:`constant
+ <isolation-level-constants>` defined in the `~psycopg2.extensions`
+ module.
+ :param readonly: if `!True`, set the connection to read only;
+ read/write if `!False`.
+ :param deferrable: if `!True`, set the connection to deferrable;
+ non deferrable if `!False`. Only available from PostgreSQL 9.1.
+ :param autocommit: switch the connection to autocommit mode: not a
+ PostgreSQL session setting but an alias for setting the
+ `autocommit` attribute.
+
+ .. _isolation level:
+ https://www.postgresql.org/docs/current/static/transaction-iso.html
+
+ Arguments set to `!None` (the default for all) will not be changed.
+ The parameters *isolation_level*, *readonly* and *deferrable* also
+ accept the string ``DEFAULT`` as a value: the effect is to reset the
+ parameter to the server default. Defaults are defined by the server
+ configuration: see values for |default_transaction_isolation|__,
+ |default_transaction_read_only|__, |default_transaction_deferrable|__.
+
+ .. |default_transaction_isolation| replace:: :sql:`default_transaction_isolation`
+ .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-ISOLATION
+ .. |default_transaction_read_only| replace:: :sql:`default_transaction_read_only`
+ .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-READ-ONLY
+ .. |default_transaction_deferrable| replace:: :sql:`default_transaction_deferrable`
+ .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-DEFERRABLE
+
+ The function must be invoked with no transaction in progress.
+
+ .. seealso:: |SET TRANSACTION|_ for further details about the behaviour
+ of the transaction parameters in the server.
+
+ .. |SET TRANSACTION| replace:: :sql:`SET TRANSACTION`
+ .. _SET TRANSACTION: https://www.postgresql.org/docs/current/static/sql-set-transaction.html
+
+ .. versionadded:: 2.4.2
+
+ .. versionchanged:: 2.7
+ Before this version, the function would have set
+ :sql:`default_transaction_*` attribute in the current session;
+ this implementation has the problem of not playing well with
+ external connection pooling working at transaction level and not
+ resetting the state of the session: changing the default
+ transaction would pollute the connections in the pool and create
+ problems to other applications using the same pool.
+
+ Starting from 2.7, if the connection is not autocommit, the
+ transaction characteristics are issued together with :sql:`BEGIN`
+ and will leave the :sql:`default_transaction_*` settings untouched.
+ For example::
+
+ conn.set_session(readonly=True)
+
+ will not change :sql:`default_transaction_read_only`, but
+ following transaction will start with a :sql:`BEGIN READ ONLY`.
+ Conversely, using::
+
+ conn.set_session(readonly=True, autocommit=True)
+
+ will set :sql:`default_transaction_read_only` to :sql:`on` and
+ rely on the server to apply the read only state to whatever
+ transaction, implicit or explicit, is executed in the connection.
+
+
+ .. attribute:: autocommit
+
+ Read/write attribute: if `!True`, no transaction is handled by the
+ driver and every statement sent to the backend has immediate effect;
+ if `!False` a new transaction is started at the first command
+ execution: the methods `commit()` or `rollback()` must be manually
+ invoked to terminate the transaction.
+
+ The autocommit mode is useful to execute commands requiring to be run
+ outside a transaction, such as :sql:`CREATE DATABASE` or
+ :sql:`VACUUM`.
+
+ The default is `!False` (manual commit) as per DBAPI specification.
+
+ .. warning::
+
+ By default, any query execution, including a simple :sql:`SELECT`
+ will start a transaction: for long-running programs, if no further
+ action is taken, the session will remain "idle in transaction", an
+ undesirable condition for several reasons (locks are held by
+ the session, tables bloat...). For long lived scripts, either
+ ensure to terminate a transaction as soon as possible or use an
+ autocommit connection.
+
+ .. versionadded:: 2.4.2
+
+
+ .. attribute:: isolation_level
+
+ Return or set the `transaction isolation level`_ for the current
+ session. The value is one of the :ref:`isolation-level-constants`
+ defined in the `psycopg2.extensions` module. On set it is also
+ possible to use one of the literal values ``READ UNCOMMITTED``, ``READ
+ COMMITTED``, ``REPEATABLE READ``, ``SERIALIZABLE``, ``DEFAULT``.
+
+ .. versionchanged:: 2.7
+
+ the property is writable.
+
+ .. versionchanged:: 2.7
+
+ the default value for `!isolation_level` is
+ `~psycopg2.extensions.ISOLATION_LEVEL_DEFAULT`; previously the
+ property would have queried the server and returned the real value
+ applied. To know this value you can run a query such as :sql:`show
+ transaction_isolation`. Usually the default value is `READ
+ COMMITTED`, but this may be changed in the server configuration.
+
+ This value is now entirely separate from the `autocommit`
+ property: in previous version, if `!autocommit` was set to `!True`
+ this property would have returned
+ `~psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT`; it will now
+ return the server isolation level.
+
+
+ .. attribute:: readonly
+
+ Return or set the read-only status for the current session. Available
+ values are `!True` (new transactions will be in read-only mode),
+ `!False` (new transactions will be writable), `!None` (use the default
+ configured for the server by :sql:`default_transaction_read_only`).
+
+ .. versionadded:: 2.7
+
+
+ .. attribute:: deferrable
+
+ Return or set the `deferrable status`__ for the current session.
+ Available values are `!True` (new transactions will be in deferrable
+ mode), `!False` (new transactions will be in non deferrable mode),
+ `!None` (use the default configured for the server by
+ :sql:`default_transaction_deferrable`).
+
+ .. __: `SET TRANSACTION`_
+
+ .. versionadded:: 2.7
+
+
+ .. method:: set_isolation_level(level)
+
+ .. note::
+
+ This is a legacy method mixing `~conn.isolation_level` and
+ `~conn.autocommit`. Using the respective properties is a better
+ option.
+
+ Set the `transaction isolation level`_ for the current session.
+ The level defines the different phenomena that can happen in the
+ database between concurrent transactions.
+
+ The value set is an integer: symbolic constants are defined in
+ the module `psycopg2.extensions`: see
+ :ref:`isolation-level-constants` for the available values.
+
+ The default level is `~psycopg2.extensions.ISOLATION_LEVEL_DEFAULT`:
+ at this level a transaction is automatically started the first time a
+ database command is executed. If you want an *autocommit* mode,
+ switch to `~psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT` before
+ executing any command::
+
+ >>> conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ See also :ref:`transactions-control`.
+
+
+ .. index::
+ pair: Client; Encoding
+
+ .. attribute:: encoding
+ .. method:: set_client_encoding(enc)
+
+ Read or set the client encoding for the current session. The default
+ is the encoding defined by the database. It should be one of the
+ `characters set supported by PostgreSQL`__
+
+ .. __: https://www.postgresql.org/docs/current/static/multibyte.html
+
+
+ .. index::
+ pair: Client; Logging
+
+ .. attribute:: notices
+
+ A list containing all the database messages sent to the client during
+ the session.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> cur.execute("CREATE TABLE foo (id serial PRIMARY KEY);")
+ >>> pprint(conn.notices)
+ ['NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "foo_pkey" for table "foo"\n',
+ 'NOTICE: CREATE TABLE will create implicit sequence "foo_id_seq" for serial column "foo.id"\n']
+
+ .. versionchanged:: 2.7
+ The `!notices` attribute is writable: the user may replace it
+ with any Python object exposing an `!append()` method. If
+ appending raises an exception the notice is silently
+ dropped.
+
+ To avoid a leak in case excessive notices are generated, only the last
+ 50 messages are kept. This check is only in place if the `!notices`
+ attribute is a list: if any other object is used it will be up to the
+ user to guard from leakage.
+
+ You can configure what messages to receive using `PostgreSQL logging
+ configuration parameters`__ such as ``log_statement``,
+ ``client_min_messages``, ``log_min_duration_statement`` etc.
+
+ .. __: https://www.postgresql.org/docs/current/static/runtime-config-logging.html
+
+
+ .. attribute:: notifies
+
+ List of `~psycopg2.extensions.Notify` objects containing asynchronous
+ notifications received by the session.
+
+ For other details see :ref:`async-notify`.
+
+ .. versionchanged:: 2.3
+ Notifications are instances of the `!Notify` object. Previously the
+ list was composed by 2 items tuples :samp:`({pid},{channel})` and
+ the payload was not accessible. To keep backward compatibility,
+ `!Notify` objects can still be accessed as 2 items tuples.
+
+ .. versionchanged:: 2.7
+ The `!notifies` attribute is writable: the user may replace it
+ with any Python object exposing an `!append()` method. If
+ appending raises an exception the notification is silently
+ dropped.
+
+
+ .. attribute:: cursor_factory
+
+ The default cursor factory used by `~connection.cursor()` if the
+ parameter is not specified.
+
+ .. versionadded:: 2.5
+
+
+ .. index::
+ pair: Connection; Info
+
+ .. attribute:: info
+
+ A `~psycopg2.extensions.ConnectionInfo` object exposing information
+ about the native libpq connection.
+
+ .. versionadded:: 2.8
+
+
+ .. index::
+ pair: Connection; Status
+
+ .. attribute:: status
+
+ A read-only integer representing the status of the connection.
+ Symbolic constants for the values are defined in the module
+ `psycopg2.extensions`: see :ref:`connection-status-constants`
+ for the available values.
+
+ The status is undefined for `closed` connections.
+
+
+ .. method:: lobject([oid [, mode [, new_oid [, new_file [, lobject_factory]]]]])
+
+ Return a new database large object as a `~psycopg2.extensions.lobject`
+ instance.
+
+ See :ref:`large-objects` for an overview.
+
+ :param oid: The OID of the object to read or write. 0 to create
+ a new large object and and have its OID assigned automatically.
+ :param mode: Access mode to the object, see below.
+ :param new_oid: Create a new object using the specified OID. The
+ function raises `~psycopg2.OperationalError` if the OID is already
+ in use. Default is 0, meaning assign a new one automatically.
+ :param new_file: The name of a file to be imported in the database
+ (using the |lo_import|_ function)
+ :param lobject_factory: Subclass of
+ `~psycopg2.extensions.lobject` to be instantiated.
+
+ .. |lo_import| replace:: `!lo_import()`
+ .. _lo_import: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-IMPORT
+
+ Available values for *mode* are:
+
+ ======= =========
+ *mode* meaning
+ ======= =========
+ ``r`` Open for read only
+ ``w`` Open for write only
+ ``rw`` Open for read/write
+ ``n`` Don't open the file
+ ``b`` Don't decode read data (return data as `!str` in Python 2 or `!bytes` in Python 3)
+ ``t`` Decode read data according to `connection.encoding` (return data as `!unicode` in Python 2 or `!str` in Python 3)
+ ======= =========
+
+ ``b`` and ``t`` can be specified together with a read/write mode. If
+ neither ``b`` nor ``t`` is specified, the default is ``b`` in Python 2
+ and ``t`` in Python 3.
+
+ .. versionadded:: 2.0.8
+
+ .. versionchanged:: 2.4 added ``b`` and ``t`` mode and unicode
+ support.
+
+
+ .. rubric:: Methods related to asynchronous support
+
+ .. versionadded:: 2.2
+
+ .. seealso:: :ref:`async-support` and :ref:`green-support`.
+
+
+ .. attribute:: async
+ async_
+
+ Read only attribute: 1 if the connection is asynchronous, 0 otherwise.
+
+ .. versionchanged:: 2.7 added the `!async_` alias for Python versions
+ where `!async` is a keyword.
+
+
+ .. method:: poll()
+
+ Used during an asynchronous connection attempt, or when a cursor is
+ executing a query on an asynchronous connection, make communication
+ proceed if it wouldn't block.
+
+ Return one of the constants defined in :ref:`poll-constants`. If it
+ returns `~psycopg2.extensions.POLL_OK` then the connection has been
+ established or the query results are available on the client.
+ Otherwise wait until the file descriptor returned by `fileno()` is
+ ready to read or to write, as explained in :ref:`async-support`.
+ `poll()` should be also used by the function installed by
+ `~psycopg2.extensions.set_wait_callback()` as explained in
+ :ref:`green-support`.
+
+ `poll()` is also used to receive asynchronous notifications from the
+ database: see :ref:`async-notify` from further details.
+
+
+ .. method:: fileno()
+
+ Return the file descriptor underlying the connection: useful to read
+ its status during asynchronous communication.
+
+
+ .. method:: isexecuting()
+
+ Return `!True` if the connection is executing an asynchronous operation.
+
+
+ .. rubric:: Interoperation with other C API modules
+
+ .. attribute:: pgconn_ptr
+
+ Return the internal `!PGconn*` as integer. Useful to pass the libpq
+ raw connection structure to C functions, e.g. via `ctypes`::
+
+ >>> import ctypes
+ >>> import ctypes.util
+ >>> libpq = ctypes.pydll.LoadLibrary(ctypes.util.find_library('pq'))
+ >>> libpq.PQserverVersion.argtypes = [ctypes.c_void_p]
+ >>> libpq.PQserverVersion.restype = ctypes.c_int
+ >>> libpq.PQserverVersion(conn.pgconn_ptr)
+ 90611
+
+ .. versionadded:: 2.8
+
+
+ .. method:: get_native_connection()
+
+ Return the internal `!PGconn*` wrapped in a PyCapsule object. This is
+ only useful for passing the `libpq` raw connection associated to this
+ connection object to other C-level modules that may have a use for it.
+
+ .. seealso:: Python C API `Capsules`__ docs.
+
+ .. __: https://docs.python.org/3.1/c-api/capsule.html
+
+ .. versionadded:: 2.8
+
+
+
+ .. rubric:: informative methods of the native connection
+
+ .. note::
+
+ These methods are better accessed using the `~connection.info`
+ attributes and may be dropped in future versions.
+
+
+ .. index::
+ pair: Transaction; Status
+
+ .. method:: get_transaction_status()
+
+ Also available as `~connection.info`\ `!.`\
+ `~psycopg2.extensions.ConnectionInfo.transaction_status`.
+
+ Return the current session transaction status as an integer. Symbolic
+ constants for the values are defined in the module
+ `psycopg2.extensions`: see :ref:`transaction-status-constants`
+ for the available values.
+
+ .. seealso:: libpq docs for `PQtransactionStatus()`__ for details.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQTRANSACTIONSTATUS
+
+
+ .. index::
+ pair: Protocol; Version
+
+ .. attribute:: protocol_version
+
+ Also available as `~connection.info`\ `!.`\
+ `~psycopg2.extensions.ConnectionInfo.protocol_version`.
+
+ A read-only integer representing frontend/backend protocol being used.
+ Currently Psycopg supports only protocol 3, which allows connection
+ to PostgreSQL server from version 7.4. Psycopg versions previous than
+ 2.3 support both protocols 2 and 3.
+
+ .. seealso:: libpq docs for `PQprotocolVersion()`__ for details.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQPROTOCOLVERSION
+
+ .. versionadded:: 2.0.12
+
+
+ .. index::
+ pair: Server; Version
+
+ .. attribute:: server_version
+
+ Also available as `~connection.info`\ `!.`\
+ `~psycopg2.extensions.ConnectionInfo.server_version`.
+
+ A read-only integer representing the backend version.
+
+ The number is formed by converting the major, minor, and revision
+ numbers into two-decimal-digit numbers and appending them together.
+ For example, version 8.1.5 will be returned as ``80105``.
+
+ .. seealso:: libpq docs for `PQserverVersion()`__ for details.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQSERVERVERSION
+
+ .. versionadded:: 2.0.12
+
+
+ .. index::
+ pair: Backend; PID
+
+ .. method:: get_backend_pid()
+
+ Also available as `~connection.info`\ `!.`\
+ `~psycopg2.extensions.ConnectionInfo.backend_pid`.
+
+ Returns the process ID (PID) of the backend server process *you
+ connected to*. Note that if you use a connection pool service such as
+ PgBouncer_ this value will not be updated if your connection is
+ switched to a different backend.
+
+ Note that the PID belongs to a process executing on the database
+ server host, not the local host!
+
+ .. seealso:: libpq docs for `PQbackendPID()`__ for details.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQBACKENDPID
+
+ .. versionadded:: 2.0.8
+
+
+ .. index::
+ pair: Server; Parameters
+
+ .. method:: get_parameter_status(parameter)
+
+ Also available as `~connection.info`\ `!.`\
+ `~psycopg2.extensions.ConnectionInfo.parameter_status()`.
+
+ Look up a current parameter setting of the server.
+
+ Potential values for ``parameter`` are: ``server_version``,
+ ``server_encoding``, ``client_encoding``, ``is_superuser``,
+ ``session_authorization``, ``DateStyle``, ``TimeZone``,
+ ``integer_datetimes``, and ``standard_conforming_strings``.
+
+ If server did not report requested parameter, return `!None`.
+
+ .. seealso:: libpq docs for `PQparameterStatus()`__ for details.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-status.html#LIBPQ-PQPARAMETERSTATUS
+
+ .. versionadded:: 2.0.12
+
+
+ .. index::
+ pair: Connection; Parameters
+
+ .. method:: get_dsn_parameters()
+
+ Also available as `~connection.info`\ `!.`\
+ `~psycopg2.extensions.ConnectionInfo.dsn_parameters`.
+
+ Get the effective dsn parameters for the connection as a dictionary.
+
+ The *password* parameter is removed from the result.
+
+ Example::
+
+ >>> conn.get_dsn_parameters()
+ {'dbname': 'test', 'user': 'postgres', 'port': '5432', 'sslmode': 'prefer'}
+
+ Requires libpq >= 9.3.
+
+ .. seealso:: libpq docs for `PQconninfo()`__ for details.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNINFO
+
+ .. versionadded:: 2.7
+
+
+.. testcode::
+ :hide:
+
+ conn.rollback()
diff --git a/doc/src/cursor.rst b/doc/src/cursor.rst
new file mode 100644
index 0000000..b3e3b42
--- /dev/null
+++ b/doc/src/cursor.rst
@@ -0,0 +1,678 @@
+The ``cursor`` class
+====================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. testsetup:: *
+
+ from StringIO import StringIO
+ import sys
+
+ create_test_table()
+
+ # initial data
+ cur.executemany("INSERT INTO test (num, data) VALUES (%s, %s)",
+ [(100, "abc'def"), (None, 'dada'), (42, 'bar')])
+ conn.commit()
+
+
+.. class:: cursor
+
+ Allows Python code to execute PostgreSQL command in a database session.
+ Cursors are created by the `connection.cursor()` method: they are
+ bound to the connection for the entire lifetime and all the commands are
+ executed in the context of the database session wrapped by the connection.
+
+ Cursors created from the same connection are not isolated, i.e., any
+ changes done to the database by a cursor are immediately visible by the
+ other cursors. Cursors created from different connections can or can not
+ be isolated, depending on the connections' :ref:`isolation level
+ <transactions-control>`. See also `~connection.rollback()` and
+ `~connection.commit()` methods.
+
+ Cursors are *not* thread safe: a multithread application can create
+ many cursors from the same connection and should use each cursor from
+ a single thread. See :ref:`thread-safety` for details.
+
+ Cursors can be used as context managers: leaving the context will close
+ the cursor.
+
+ .. code:: python
+
+ with conn.cursor() as curs:
+ curs.execute(SQL)
+
+ # the cursor is now closed
+
+
+ .. attribute:: description
+
+ Read-only attribute describing the result of a query. It is a
+ sequence of `~psycopg2.extensions.Column` instances, each one
+ describing one result column in order. The attribute is `!None` for
+ operations that do not return rows or if the cursor has not had an
+ operation invoked via the |execute*|_ methods yet.
+
+ For compatibility with the DB-API, every object can be unpacked as a
+ 7-items sequence: the attributes retuned this way are the following.
+ For further details and other attributes available check the
+ `~psycopg2.extensions.Column` documentation.
+
+ 0. `~psycopg2.extensions.Column.name`: the name of the column returned.
+
+ 1. `~psycopg2.extensions.Column.type_code`: the PostgreSQL OID of the
+ column.
+
+ 2. `~psycopg2.extensions.Column.display_size`: the actual length of
+ the column in bytes.
+
+ 3. `~psycopg2.extensions.Column.internal_size`: the size in bytes of
+ the column associated to this column on the server.
+
+ 4. `~psycopg2.extensions.Column.precision`: total number of
+ significant digits in columns of type |NUMERIC|. `!None`
+ for other types.
+
+ 5. `~psycopg2.extensions.Column.scale`: count of decimal digits in
+ the fractional part in columns of type |NUMERIC|. `!None`
+ for other types.
+
+ 6. `~psycopg2.extensions.Column.null_ok`: always `!None` as not easy
+ to retrieve from the libpq.
+
+ .. versionchanged:: 2.4
+ if possible, columns descriptions are named tuple instead of
+ regular tuples.
+
+ .. versionchanged:: 2.8
+ columns descriptions are instances of `!Column`, exposing extra
+ attributes.
+
+ .. |NUMERIC| replace:: :sql:`NUMERIC`
+
+ .. method:: close()
+
+ Close the cursor now (rather than whenever `del` is executed).
+ The cursor will be unusable from this point forward; an
+ `~psycopg2.InterfaceError` will be raised if any operation is
+ attempted with the cursor.
+
+ .. versionchanged:: 2.5 if the cursor is used in a ``with`` statement,
+ the method is automatically called at the end of the ``with``
+ block.
+
+
+ .. attribute:: closed
+
+ Read-only boolean attribute: specifies if the cursor is closed
+ (`!True`) or not (`!False`).
+
+ .. extension::
+
+ The `closed` attribute is a Psycopg extension to the
+ |DBAPI|.
+
+ .. versionadded:: 2.0.7
+
+
+ .. attribute:: connection
+
+ Read-only attribute returning a reference to the `connection`
+ object on which the cursor was created.
+
+
+ .. attribute:: name
+
+ Read-only attribute containing the name of the cursor if it was
+ created as named cursor by `connection.cursor()`, or `!None` if
+ it is a client side cursor. See :ref:`server-side-cursors`.
+
+ .. extension::
+
+ The `name` attribute is a Psycopg extension to the |DBAPI|.
+
+
+ .. attribute:: scrollable
+
+ Read/write attribute: specifies if a named cursor is declared
+ :sql:`SCROLL`, hence is capable to scroll backwards (using
+ `~cursor.scroll()`). If `!True`, the cursor can be scrolled backwards,
+ if `!False` it is never scrollable. If `!None` (default) the cursor
+ scroll option is not specified, usually but not always meaning no
+ backward scroll (see the |declare-notes|__).
+
+ .. |declare-notes| replace:: :sql:`DECLARE` notes
+ .. __: https://www.postgresql.org/docs/current/static/sql-declare.html#SQL-DECLARE-NOTES
+
+ .. note::
+
+ set the value before calling `~cursor.execute()` or use the
+ `connection.cursor()` *scrollable* parameter, otherwise the value
+ will have no effect.
+
+ .. versionadded:: 2.5
+
+ .. extension::
+
+ The `scrollable` attribute is a Psycopg extension to the |DBAPI|.
+
+
+ .. attribute:: withhold
+
+ Read/write attribute: specifies if a named cursor lifetime should
+ extend outside of the current transaction, i.e., it is possible to
+ fetch from the cursor even after a `connection.commit()` (but not after
+ a `connection.rollback()`). See :ref:`server-side-cursors`
+
+ .. note::
+
+ set the value before calling `~cursor.execute()` or use the
+ `connection.cursor()` *withhold* parameter, otherwise the value
+ will have no effect.
+
+ .. versionadded:: 2.4.3
+
+ .. extension::
+
+ The `withhold` attribute is a Psycopg extension to the |DBAPI|.
+
+
+ .. |execute*| replace:: `execute*()`
+
+ .. _execute*:
+
+ .. rubric:: Commands execution methods
+
+
+ .. method:: execute(query, vars=None)
+
+ Execute a database operation (query or command).
+
+ Parameters may be provided as sequence or mapping and will be bound to
+ variables in the operation. Variables are specified either with
+ positional (``%s``) or named (:samp:`%({name})s`) placeholders. See
+ :ref:`query-parameters`.
+
+ The method returns `!None`. If a query was executed, the returned
+ values can be retrieved using |fetch*|_ methods.
+
+
+ .. method:: executemany(query, vars_list)
+
+ Execute a database operation (query or command) against all parameter
+ tuples or mappings found in the sequence *vars_list*.
+
+ The function is mostly useful for commands that update the database:
+ any result set returned by the query is discarded.
+
+ Parameters are bounded to the query using the same rules described in
+ the `~cursor.execute()` method.
+
+ .. warning::
+ In its current implementation this method is not faster than
+ executing `~cursor.execute()` in a loop. For better performance
+ you can use the functions described in :ref:`fast-exec`.
+
+
+ .. method:: callproc(procname [, parameters])
+
+ Call a stored database procedure with the given name. The sequence of
+ parameters must contain one entry for each argument that the procedure
+ expects. Overloaded procedures are supported. Named parameters can be
+ used by supplying the parameters as a dictionary.
+
+ This function is, at present, not DBAPI-compliant. The return value is
+ supposed to consist of the sequence of parameters with modified output
+ and input/output parameters. In future versions, the DBAPI-compliant
+ return value may be implemented, but for now the function returns None.
+
+ The procedure may provide a result set as output. This is then made
+ available through the standard |fetch*|_ methods.
+
+ .. versionchanged:: 2.7
+ added support for named arguments.
+
+ .. note::
+
+ `!callproc()` can only be used with PostgreSQL functions__, not
+ with the procedures__ introduced in PostgreSQL 11, which require
+ the :sql:`CALL` statement to run. Please use a normal
+ `execute()` to run them.
+
+ .. __: https://www.postgresql.org/docs/current/sql-createfunction.html
+ .. __: https://www.postgresql.org/docs/current/sql-createprocedure.html
+
+ .. method:: mogrify(operation [, parameters])
+
+ Return a query string after arguments binding. The string returned is
+ exactly the one that would be sent to the database running the
+ `~cursor.execute()` method or similar.
+
+ The returned string is always a bytes string.
+
+ >>> cur.mogrify("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar'))
+ "INSERT INTO test (num, data) VALUES (42, E'bar')"
+
+ .. extension::
+
+ The `mogrify()` method is a Psycopg extension to the |DBAPI|.
+
+ .. method:: setinputsizes(sizes)
+
+ This method is exposed in compliance with the |DBAPI|. It currently
+ does nothing but it is safe to call it.
+
+
+
+ .. |fetch*| replace:: `!fetch*()`
+
+ .. _fetch*:
+
+ .. rubric:: Results retrieval methods
+
+
+ The following methods are used to read data from the database after an
+ `~cursor.execute()` call.
+
+ .. _cursor-iterable:
+
+ .. note::
+
+ `cursor` objects are iterable, so, instead of calling
+ explicitly `~cursor.fetchone()` in a loop, the object itself can
+ be used:
+
+ >>> cur.execute("SELECT * FROM test;")
+ >>> for record in cur:
+ ... print record
+ ...
+ (1, 100, "abc'def")
+ (2, None, 'dada')
+ (3, 42, 'bar')
+
+ .. versionchanged:: 2.4
+ iterating over a :ref:`named cursor <server-side-cursors>`
+ fetches `~cursor.itersize` records at time from the backend.
+ Previously only one record was fetched per roundtrip, resulting
+ in a large overhead.
+
+ .. method:: fetchone()
+
+ Fetch the next row of a query result set, returning a single tuple,
+ or `!None` when no more data is available:
+
+ >>> cur.execute("SELECT * FROM test WHERE id = %s", (3,))
+ >>> cur.fetchone()
+ (3, 42, 'bar')
+
+ A `~psycopg2.ProgrammingError` is raised if the previous call
+ to |execute*|_ did not produce any result set or no call was issued
+ yet.
+
+
+ .. method:: fetchmany([size=cursor.arraysize])
+
+ Fetch the next set of rows of a query result, returning a list of
+ tuples. An empty list is returned when no more rows are available.
+
+ The number of rows to fetch per call is specified by the parameter.
+ If it is not given, the cursor's `~cursor.arraysize` determines
+ the number of rows to be fetched. The method should try to fetch as
+ many rows as indicated by the size parameter. If this is not possible
+ due to the specified number of rows not being available, fewer rows
+ may be returned:
+
+ >>> cur.execute("SELECT * FROM test;")
+ >>> cur.fetchmany(2)
+ [(1, 100, "abc'def"), (2, None, 'dada')]
+ >>> cur.fetchmany(2)
+ [(3, 42, 'bar')]
+ >>> cur.fetchmany(2)
+ []
+
+ A `~psycopg2.ProgrammingError` is raised if the previous call to
+ |execute*|_ did not produce any result set or no call was issued yet.
+
+ Note there are performance considerations involved with the size
+ parameter. For optimal performance, it is usually best to use the
+ `~cursor.arraysize` attribute. If the size parameter is used,
+ then it is best for it to retain the same value from one
+ `fetchmany()` call to the next.
+
+
+ .. method:: fetchall()
+
+ Fetch all (remaining) rows of a query result, returning them as a list
+ of tuples. An empty list is returned if there is no more record to
+ fetch.
+
+ >>> cur.execute("SELECT * FROM test;")
+ >>> cur.fetchall()
+ [(1, 100, "abc'def"), (2, None, 'dada'), (3, 42, 'bar')]
+
+ A `~psycopg2.ProgrammingError` is raised if the previous call to
+ |execute*|_ did not produce any result set or no call was issued yet.
+
+
+ .. method:: scroll(value [, mode='relative'])
+
+ Scroll the cursor in the result set to a new position according
+ to mode.
+
+ If `mode` is ``relative`` (default), value is taken as offset to
+ the current position in the result set, if set to ``absolute``,
+ value states an absolute target position.
+
+ If the scroll operation would leave the result set, a
+ `~psycopg2.ProgrammingError` is raised and the cursor position is
+ not changed.
+
+ .. note::
+
+ According to the |DBAPI|_, the exception raised for a cursor out
+ of bound should have been `!IndexError`. The best option is
+ probably to catch both exceptions in your code::
+
+ try:
+ cur.scroll(1000 * 1000)
+ except (ProgrammingError, IndexError), exc:
+ deal_with_it(exc)
+
+ The method can be used both for client-side cursors and
+ :ref:`server-side cursors <server-side-cursors>`. Server-side cursors
+ can usually scroll backwards only if declared `~cursor.scrollable`.
+ Moving out-of-bound in a server-side cursor doesn't result in an
+ exception, if the backend doesn't raise any (Postgres doesn't tell us
+ in a reliable way if we went out of bound).
+
+
+ .. attribute:: arraysize
+
+ This read/write attribute specifies the number of rows to fetch at a
+ time with `~cursor.fetchmany()`. It defaults to 1 meaning to fetch
+ a single row at a time.
+
+
+ .. attribute:: itersize
+
+ Read/write attribute specifying the number of rows to fetch from the
+ backend at each network roundtrip during :ref:`iteration
+ <cursor-iterable>` on a :ref:`named cursor <server-side-cursors>`. The
+ default is 2000.
+
+ .. versionadded:: 2.4
+
+ .. extension::
+
+ The `itersize` attribute is a Psycopg extension to the |DBAPI|.
+
+
+ .. attribute:: rowcount
+
+ This read-only attribute specifies the number of rows that the last
+ |execute*|_ produced (for :abbr:`DQL (Data Query Language)` statements
+ like :sql:`SELECT`) or affected (for
+ :abbr:`DML (Data Manipulation Language)` statements like :sql:`UPDATE`
+ or :sql:`INSERT`).
+
+ The attribute is -1 in case no |execute*| has been performed on
+ the cursor or the row count of the last operation if it can't be
+ determined by the interface.
+
+ .. note::
+ The |DBAPI|_ interface reserves to redefine the latter case to
+ have the object return `!None` instead of -1 in future versions
+ of the specification.
+
+
+ .. attribute:: rownumber
+
+ This read-only attribute provides the current 0-based index of the
+ cursor in the result set or `!None` if the index cannot be
+ determined.
+
+ The index can be seen as index of the cursor in a sequence (the result
+ set). The next fetch operation will fetch the row indexed by
+ `rownumber` in that sequence.
+
+
+ .. index:: oid
+
+ .. attribute:: lastrowid
+
+ This read-only attribute provides the OID of the last row inserted
+ by the cursor. If the table wasn't created with OID support or the
+ last operation is not a single record insert, the attribute is set to
+ `!None`.
+
+ .. note::
+
+ PostgreSQL currently advices to not create OIDs on the tables and
+ the default for |CREATE-TABLE|__ is to not support them. The
+ |INSERT-RETURNING|__ syntax available from PostgreSQL 8.3 allows
+ more flexibility.
+
+ .. |CREATE-TABLE| replace:: :sql:`CREATE TABLE`
+ .. __: https://www.postgresql.org/docs/current/static/sql-createtable.html
+
+ .. |INSERT-RETURNING| replace:: :sql:`INSERT ... RETURNING`
+ .. __: https://www.postgresql.org/docs/current/static/sql-insert.html
+
+
+ .. attribute:: query
+
+ Read-only attribute containing the body of the last query sent to the
+ backend (including bound arguments) as bytes string. `!None` if no
+ query has been executed yet:
+
+ >>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar'))
+ >>> cur.query
+ "INSERT INTO test (num, data) VALUES (42, E'bar')"
+
+ .. extension::
+
+ The `query` attribute is a Psycopg extension to the |DBAPI|.
+
+
+ .. attribute:: statusmessage
+
+ Read-only attribute containing the message returned by the last
+ command:
+
+ >>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar'))
+ >>> cur.statusmessage
+ 'INSERT 0 1'
+
+ .. extension::
+
+ The `statusmessage` attribute is a Psycopg extension to the
+ |DBAPI|.
+
+
+ .. method:: cast(oid, s)
+
+ Convert a value from the PostgreSQL string representation to a Python
+ object.
+
+ Use the most specific of the typecasters registered by
+ `~psycopg2.extensions.register_type()`.
+
+ .. versionadded:: 2.4
+
+ .. extension::
+
+ The `cast()` method is a Psycopg extension to the |DBAPI|.
+
+
+ .. attribute:: tzinfo_factory
+
+ The time zone factory used to handle data types such as
+ :sql:`TIMESTAMP WITH TIME ZONE`. It should be a `~datetime.tzinfo`
+ object. Default is `datetime.timezone`.
+
+ .. versionchanged:: 2.9
+ previosly the default factory was `psycopg2.tz.FixedOffsetTimezone`.
+
+
+ .. method:: nextset()
+
+ This method is not supported (PostgreSQL does not have multiple data
+ sets) and will raise a `~psycopg2.NotSupportedError` exception.
+
+
+ .. method:: setoutputsize(size [, column])
+
+ This method is exposed in compliance with the |DBAPI|. It currently
+ does nothing but it is safe to call it.
+
+
+
+ .. rubric:: COPY-related methods
+
+ Efficiently copy data from file-like objects to the database and back. See
+ :ref:`copy` for an overview.
+
+ .. extension::
+
+ The :sql:`COPY` command is a PostgreSQL extension to the SQL standard.
+ As such, its support is a Psycopg extension to the |DBAPI|.
+
+ .. method:: copy_from(file, table, sep='\\t', null='\\\\N', size=8192, columns=None)
+
+ Read data *from* the file-like object *file* appending them to
+ the table named *table*.
+
+ :param file: file-like object to read data from. It must have both
+ `!read()` and `!readline()` methods.
+ :param table: name of the table to copy data into.
+ :param sep: columns separator expected in the file. Defaults to a tab.
+ :param null: textual representation of :sql:`NULL` in the file.
+ The default is the two characters string ``\N``.
+ :param size: size of the buffer used to read from the file.
+ :param columns: iterable with name of the columns to import.
+ The length and types should match the content of the file to read.
+ If not specified, it is assumed that the entire table matches the
+ file structure.
+
+ Example::
+
+ >>> f = StringIO("42\tfoo\n74\tbar\n")
+ >>> cur.copy_from(f, 'test', columns=('num', 'data'))
+ >>> cur.execute("select * from test where id > 5;")
+ >>> cur.fetchall()
+ [(6, 42, 'foo'), (7, 74, 'bar')]
+
+ .. note:: the name of the table is not quoted: if the table name
+ contains uppercase letters or special characters it must be quoted
+ with double quotes::
+
+ cur.copy_from(f, '"TABLE"')
+
+
+ .. versionchanged:: 2.0.6
+ added the *columns* parameter.
+
+ .. versionchanged:: 2.4
+ data read from files implementing the `io.TextIOBase` interface
+ are encoded in the connection `~connection.encoding` when sent to
+ the backend.
+
+ .. method:: copy_to(file, table, sep='\\t', null='\\\\N', columns=None)
+
+ Write the content of the table named *table* *to* the file-like
+ object *file*. See :ref:`copy` for an overview.
+
+ :param file: file-like object to write data into. It must have a
+ `!write()` method.
+ :param table: name of the table to copy data from.
+ :param sep: columns separator expected in the file. Defaults to a tab.
+ :param null: textual representation of :sql:`NULL` in the file.
+ The default is the two characters string ``\N``.
+ :param columns: iterable with name of the columns to export.
+ If not specified, export all the columns.
+
+ Example::
+
+ >>> cur.copy_to(sys.stdout, 'test', sep="|")
+ 1|100|abc'def
+ 2|\N|dada
+ ...
+
+ .. note:: the name of the table is not quoted: if the table name
+ contains uppercase letters or special characters it must be quoted
+ with double quotes::
+
+ cur.copy_to(f, '"TABLE"')
+
+ .. versionchanged:: 2.0.6
+ added the *columns* parameter.
+
+ .. versionchanged:: 2.4
+ data sent to files implementing the `io.TextIOBase` interface
+ are decoded in the connection `~connection.encoding` when read
+ from the backend.
+
+
+ .. method:: copy_expert(sql, file, size=8192)
+
+ Submit a user-composed :sql:`COPY` statement. The method is useful to
+ handle all the parameters that PostgreSQL makes available (see
+ |COPY|__ command documentation).
+
+ :param sql: the :sql:`COPY` statement to execute.
+ :param file: a file-like object to read or write (according to *sql*).
+ :param size: size of the read buffer to be used in :sql:`COPY FROM`.
+
+ The *sql* statement should be in the form :samp:`COPY {table} TO
+ STDOUT` to export :samp:`{table}` to the *file* object passed as
+ argument or :samp:`COPY {table} FROM STDIN` to import the content of
+ the *file* object into :samp:`{table}`. If you need to compose a
+ :sql:`COPY` statement dynamically (because table, fields, or query
+ parameters are in Python variables) you may use the objects provided
+ by the `psycopg2.sql` module.
+
+ *file* must be a readable file-like object (as required by
+ `~cursor.copy_from()`) for *sql* statement :sql:`COPY ... FROM STDIN`
+ or a writable one (as required by `~cursor.copy_to()`) for :sql:`COPY
+ ... TO STDOUT`.
+
+ Example:
+
+ >>> cur.copy_expert("COPY test TO STDOUT WITH CSV HEADER", sys.stdout)
+ id,num,data
+ 1,100,abc'def
+ 2,,dada
+ ...
+
+ .. |COPY| replace:: :sql:`COPY`
+ .. __: https://www.postgresql.org/docs/current/static/sql-copy.html
+
+ .. versionadded:: 2.0.6
+
+ .. versionchanged:: 2.4
+ files implementing the `io.TextIOBase` interface are dealt with
+ using Unicode data instead of bytes.
+
+
+ .. rubric:: Interoperation with other C API modules
+
+ .. attribute:: pgresult_ptr
+
+ Return the cursor's internal `!PGresult*` as integer. Useful to pass
+ the libpq raw result structure to C functions, e.g. via `ctypes`::
+
+ >>> import ctypes
+ >>> libpq = ctypes.pydll.LoadLibrary(ctypes.util.find_library('pq'))
+ >>> libpq.PQcmdStatus.argtypes = [ctypes.c_void_p]
+ >>> libpq.PQcmdStatus.restype = ctypes.c_char_p
+
+ >>> curs.execute("select 'x'")
+ >>> libpq.PQcmdStatus(curs.pgresult_ptr)
+ b'SELECT 1'
+
+ .. versionadded:: 2.8
+
+.. testcode::
+ :hide:
+
+ conn.rollback()
diff --git a/doc/src/errorcodes.rst b/doc/src/errorcodes.rst
new file mode 100644
index 0000000..2966efa
--- /dev/null
+++ b/doc/src/errorcodes.rst
@@ -0,0 +1,76 @@
+`psycopg2.errorcodes` -- Error codes defined by PostgreSQL
+===============================================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. index::
+ single: Error; Codes
+
+.. module:: psycopg2.errorcodes
+
+.. testsetup:: *
+
+ from psycopg2 import errorcodes
+
+.. versionadded:: 2.0.6
+
+This module contains symbolic names for all PostgreSQL error codes and error
+classes codes. Subclasses of `~psycopg2.Error` make the PostgreSQL error
+code available in the `~psycopg2.Error.pgcode` attribute.
+
+From PostgreSQL documentation:
+
+ All messages emitted by the PostgreSQL server are assigned five-character
+ error codes that follow the SQL standard's conventions for :sql:`SQLSTATE`
+ codes. Applications that need to know which error condition has occurred
+ should usually test the error code, rather than looking at the textual
+ error message. The error codes are less likely to change across
+ PostgreSQL releases, and also are not subject to change due to
+ localization of error messages. Note that some, but not all, of the error
+ codes produced by PostgreSQL are defined by the SQL standard; some
+ additional error codes for conditions not defined by the standard have
+ been invented or borrowed from other databases.
+
+ According to the standard, the first two characters of an error code
+ denote a class of errors, while the last three characters indicate a
+ specific condition within that class. Thus, an application that does not
+ recognize the specific error code can still be able to infer what to do
+ from the error class.
+
+.. seealso:: `PostgreSQL Error Codes table`__
+
+ .. __: https://www.postgresql.org/docs/current/static/errcodes-appendix.html#ERRCODES-TABLE
+
+
+An example of the available constants defined in the module:
+
+ >>> errorcodes.CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION
+ '42'
+ >>> errorcodes.UNDEFINED_TABLE
+ '42P01'
+
+Constants representing all the error values defined by PostgreSQL versions
+between 8.1 and 13 are included in the module.
+
+
+.. autofunction:: lookup(code)
+
+ .. doctest::
+
+ >>> try:
+ ... cur.execute("SELECT ouch FROM aargh;")
+ ... except Exception as e:
+ ... pass
+ ...
+ >>> errorcodes.lookup(e.pgcode[:2])
+ 'CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION'
+ >>> errorcodes.lookup(e.pgcode)
+ 'UNDEFINED_TABLE'
+
+ .. versionadded:: 2.0.14
+
+
+.. testcode::
+ :hide:
+
+ conn.rollback()
diff --git a/doc/src/errors.rst b/doc/src/errors.rst
new file mode 100644
index 0000000..d1aed13
--- /dev/null
+++ b/doc/src/errors.rst
@@ -0,0 +1,83 @@
+`psycopg2.errors` -- Exception classes mapping PostgreSQL errors
+================================================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. index::
+ single: Error; Class
+
+.. module:: psycopg2.errors
+
+.. versionadded:: 2.8
+
+.. versionchanged:: 2.8.4 added errors introduced in PostgreSQL 12
+
+.. versionchanged:: 2.8.6 added errors introduced in PostgreSQL 13
+
+This module exposes the classes psycopg raises upon receiving an error from
+the database with a :sql:`SQLSTATE` value attached (available in the
+`~psycopg2.Error.pgcode` attribute). The content of the module is generated
+from the PostgreSQL source code and includes classes for every error defined
+by PostgreSQL in versions between 9.1 and 13.
+
+Every class in the module is named after what referred as "condition name" `in
+the documentation`__, converted to CamelCase: e.g. the error 22012,
+``division_by_zero`` is exposed by this module as the class `!DivisionByZero`.
+
+.. __: https://www.postgresql.org/docs/current/static/errcodes-appendix.html#ERRCODES-TABLE
+
+Every exception class is a subclass of one of the :ref:`standard DB-API
+exception <dbapi-exceptions>` and expose the `~psycopg2.Error` interface.
+Each class' superclass is what used to be raised by psycopg in versions before
+the introduction of this module, so everything should be compatible with
+previously written code catching one the DB-API class: if your code used to
+catch `!IntegrityError` to detect a duplicate entry, it will keep on working
+even if a more specialised subclass such as `UniqueViolation` is raised.
+
+The new classes allow a more idiomatic way to check and process a specific
+error among the many the database may return. For instance, in order to check
+that a table is locked, the following code could have been used previously:
+
+.. code-block:: python
+
+ try:
+ cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT")
+ except psycopg2.OperationalError as e:
+ if e.pgcode == psycopg2.errorcodes.LOCK_NOT_AVAILABLE:
+ locked = True
+ else:
+ raise
+
+While this method is still available, the specialised class allows for a more
+idiomatic error handler:
+
+.. code-block:: python
+
+ try:
+ cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT")
+ except psycopg2.errors.LockNotAvailable:
+ locked = True
+
+
+.. autofunction:: lookup
+
+ .. code-block:: python
+
+ try:
+ cur.execute("LOCK TABLE mytable IN ACCESS EXCLUSIVE MODE NOWAIT")
+ except psycopg2.errors.lookup("55P03"):
+ locked = True
+
+
+SQLSTATE exception classes
+--------------------------
+
+The following table contains the list of all the SQLSTATE classes exposed by
+the module.
+
+Note that, for completeness, the module also exposes all the
+:ref:`DB-API-defined exceptions <dbapi-exceptions>` and :ref:`a few
+psycopg-specific ones <extension-exceptions>` exposed by the `!extensions`
+module, which are not listed here.
+
+.. include:: sqlstate_errors.rst
diff --git a/doc/src/extensions.rst b/doc/src/extensions.rst
new file mode 100644
index 0000000..763910d
--- /dev/null
+++ b/doc/src/extensions.rst
@@ -0,0 +1,1010 @@
+`psycopg2.extensions` -- Extensions to the DB API
+======================================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. module:: psycopg2.extensions
+
+.. testsetup:: *
+
+ from psycopg2.extensions import AsIs, Binary, QuotedString, ISOLATION_LEVEL_AUTOCOMMIT
+
+The module contains a few objects and function extending the minimum set of
+functionalities defined by the |DBAPI|_.
+
+Classes definitions
+-------------------
+
+Instances of these classes are usually returned by factory functions or
+attributes. Their definitions are exposed here to allow subclassing,
+introspection etc.
+
+.. class:: connection(dsn, async=False)
+
+ Is the class usually returned by the `~psycopg2.connect()` function.
+ It is exposed by the `extensions` module in order to allow
+ subclassing to extend its behaviour: the subclass should be passed to the
+ `!connect()` function using the `connection_factory` parameter.
+ See also :ref:`subclassing-connection`.
+
+ For a complete description of the class, see `connection`.
+
+ .. versionchanged:: 2.7
+ *async_* can be used as alias for *async*.
+
+.. class:: cursor(conn, name=None)
+
+ It is the class usually returned by the `connection.cursor()`
+ method. It is exposed by the `extensions` module in order to allow
+ subclassing to extend its behaviour: the subclass should be passed to the
+ `!cursor()` method using the `cursor_factory` parameter. See
+ also :ref:`subclassing-cursor`.
+
+ For a complete description of the class, see `cursor`.
+
+
+.. class:: lobject(conn [, oid [, mode [, new_oid [, new_file ]]]])
+
+ Wrapper for a PostgreSQL large object. See :ref:`large-objects` for an
+ overview.
+
+ The class can be subclassed: see the `connection.lobject()` to know
+ how to specify a `!lobject` subclass.
+
+ .. versionadded:: 2.0.8
+
+ .. attribute:: oid
+
+ Database OID of the object.
+
+
+ .. attribute:: mode
+
+ The mode the database was open. See `connection.lobject()` for a
+ description of the available modes.
+
+
+ .. method:: read(bytes=-1)
+
+ Read a chunk of data from the current file position. If -1 (default)
+ read all the remaining data.
+
+ The result is an Unicode string (decoded according to
+ `connection.encoding`) if the file was open in ``t`` mode, a bytes
+ string for ``b`` mode.
+
+ .. versionchanged:: 2.4
+ added Unicode support.
+
+
+ .. method:: write(str)
+
+ Write a string to the large object. Return the number of bytes
+ written. Unicode strings are encoded in the `connection.encoding`
+ before writing.
+
+ .. versionchanged:: 2.4
+ added Unicode support.
+
+
+ .. method:: export(file_name)
+
+ Export the large object content to the file system.
+
+ The method uses the efficient |lo_export|_ libpq function.
+
+ .. |lo_export| replace:: `!lo_export()`
+ .. _lo_export: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-EXPORT
+
+
+ .. method:: seek(offset, whence=0)
+
+ Set the lobject current position.
+
+ .. versionchanged:: 2.6
+ added support for *offset* > 2GB.
+
+
+ .. method:: tell()
+
+ Return the lobject current position.
+
+ .. versionadded:: 2.2
+
+ .. versionchanged:: 2.6
+ added support for return value > 2GB.
+
+
+ .. method:: truncate(len=0)
+
+ Truncate the lobject to the given size.
+
+ The method will only be available if Psycopg has been built against
+ libpq from PostgreSQL 8.3 or later and can only be used with
+ PostgreSQL servers running these versions. It uses the |lo_truncate|_
+ libpq function.
+
+ .. |lo_truncate| replace:: `!lo_truncate()`
+ .. _lo_truncate: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-TRUNCATE
+
+ .. versionadded:: 2.2
+
+ .. versionchanged:: 2.6
+ added support for *len* > 2GB.
+
+ .. warning::
+
+ If Psycopg is built with |lo_truncate| support or with the 64 bits API
+ support (resp. from PostgreSQL versions 8.3 and 9.3) but at runtime an
+ older version of the dynamic library is found, the ``psycopg2`` module
+ will fail to import. See :ref:`the lo_truncate FAQ <faq-lo_truncate>`
+ about the problem.
+
+
+ .. method:: close()
+
+ Close the object.
+
+ .. attribute:: closed
+
+ Boolean attribute specifying if the object is closed.
+
+ .. method:: unlink()
+
+ Close the object and remove it from the database.
+
+
+
+.. autoclass:: ConnectionInfo(connection)
+
+ .. versionadded:: 2.8
+
+ .. autoattribute:: dbname
+ .. autoattribute:: user
+ .. autoattribute:: password
+ .. autoattribute:: host
+ .. autoattribute:: port
+ .. autoattribute:: options
+ .. autoattribute:: dsn_parameters
+
+ Example::
+
+ >>> conn.info.dsn_parameters
+ {'dbname': 'test', 'user': 'postgres', 'port': '5432', 'sslmode': 'prefer'}
+
+ Requires libpq >= 9.3.
+
+ .. autoattribute:: status
+ .. autoattribute:: transaction_status
+ .. automethod:: parameter_status(name)
+
+ .. autoattribute:: protocol_version
+
+ Currently Psycopg supports only protocol 3, which allows connection
+ to PostgreSQL server from version 7.4. Psycopg versions previous than
+ 2.3 support both protocols 2 and 3.
+
+ .. autoattribute:: server_version
+
+ The number is formed by converting the major, minor, and revision
+ numbers into two-decimal-digit numbers and appending them together.
+ After PostgreSQL 10 the minor version was dropped, so the second group
+ of digits is always ``00``. For example, version 9.3.5 will be
+ returned as ``90305``, version 10.2 as ``100002``.
+
+ .. autoattribute:: error_message
+ .. autoattribute:: socket
+ .. autoattribute:: backend_pid
+ .. autoattribute:: needs_password
+ .. autoattribute:: used_password
+ .. autoattribute:: ssl_in_use
+ .. automethod:: ssl_attribute(name)
+ .. autoattribute:: ssl_attribute_names
+
+
+.. class:: Column(\*args, \*\*kwargs)
+
+ Description of one result column, exposed as items of the
+ `cursor.description` sequence.
+
+ .. versionadded:: 2.8
+
+ in previous version the `!description` attribute was a sequence of
+ simple tuples or namedtuples.
+
+ .. attribute:: name
+
+ The name of the column returned.
+
+ .. attribute:: type_code
+
+ The PostgreSQL OID of the column. You can use the |pg_type|_ system
+ table to get more informations about the type. This is the value used
+ by Psycopg to decide what Python type use to represent the value. See
+ also :ref:`type-casting-from-sql-to-python`.
+
+ .. attribute:: display_size
+
+ Supposed to be the actual length of the column in bytes. Obtaining
+ this value is computationally intensive, so it is always `!None`.
+
+ .. versionchanged:: 2.8
+ It was previously possible to obtain this value using a compiler
+ flag at builtin.
+
+ .. attribute:: internal_size
+
+ The size in bytes of the column associated to this column on the
+ server. Set to a negative value for variable-size types See also
+ PQfsize_.
+
+ .. attribute:: precision
+
+ Total number of significant digits in columns of type |NUMERIC|_.
+ `!None` for other types.
+
+ .. attribute:: scale
+
+ Count of decimal digits in the fractional part in columns of type
+ |NUMERIC|. `!None` for other types.
+
+ .. attribute:: null_ok
+
+ Always `!None` as not easy to retrieve from the libpq.
+
+ .. attribute:: table_oid
+
+ The oid of the table from which the column was fetched (matching
+ :sql:`pg_class.oid`). `!None` if the column is not a simple reference
+ to a table column. See also PQftable_.
+
+ .. versionadded:: 2.8
+
+ .. attribute:: table_column
+
+ The number of the column (within its table) making up the result
+ (matching :sql:`pg_attribute.attnum`, so it will start from 1).
+ `!None` if the column is not a simple reference to a table column. See
+ also PQftablecol_.
+
+ .. versionadded:: 2.8
+
+ .. |pg_type| replace:: :sql:`pg_type`
+ .. _pg_type: https://www.postgresql.org/docs/current/static/catalog-pg-type.html
+ .. _PQgetlength: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQGETLENGTH
+ .. _PQfsize: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQFSIZE
+ .. _PQftable: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQFTABLE
+ .. _PQftablecol: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQFTABLECOL
+ .. _NUMERIC: https://www.postgresql.org/docs/current/static/datatype-numeric.html#DATATYPE-NUMERIC-DECIMAL
+ .. |NUMERIC| replace:: :sql:`NUMERIC`
+
+.. autoclass:: Notify(pid, channel, payload='')
+ :members: pid, channel, payload
+
+ .. versionadded:: 2.3
+
+
+.. autoclass:: Xid(format_id, gtrid, bqual)
+ :members: format_id, gtrid, bqual, prepared, owner, database
+
+ .. versionadded:: 2.3
+
+ .. automethod:: from_string(s)
+
+
+.. autoclass:: Diagnostics(exception)
+
+ .. versionadded:: 2.5
+
+ The attributes currently available are:
+
+ .. attribute::
+ column_name
+ constraint_name
+ context
+ datatype_name
+ internal_position
+ internal_query
+ message_detail
+ message_hint
+ message_primary
+ schema_name
+ severity
+ severity_nonlocalized
+ source_file
+ source_function
+ source_line
+ sqlstate
+ statement_position
+ table_name
+
+ A string with the error field if available; `!None` if not available.
+ The attribute value is available only if the error sent by the server:
+ not all the fields are available for all the errors and for all the
+ server versions.
+
+ .. versionadded:: 2.8
+ The `!severity_nonlocalized` attribute.
+
+
+
+.. _sql-adaptation-objects:
+
+SQL adaptation protocol objects
+-------------------------------
+
+Psycopg provides a flexible system to adapt Python objects to the SQL syntax
+(inspired to the :pep:`246`), allowing serialization in PostgreSQL. See
+:ref:`adapting-new-types` for a detailed description. The following objects
+deal with Python objects adaptation:
+
+.. function:: adapt(obj)
+
+ Return the SQL representation of *obj* as an `ISQLQuote`. Raise a
+ `~psycopg2.ProgrammingError` if how to adapt the object is unknown.
+ In order to allow new objects to be adapted, register a new adapter for it
+ using the `register_adapter()` function.
+
+ The function is the entry point of the adaptation mechanism: it can be
+ used to write adapters for complex objects by recursively calling
+ `!adapt()` on its components.
+
+.. function:: register_adapter(class, adapter)
+
+ Register a new adapter for the objects of class *class*.
+
+ *adapter* should be a function taking a single argument (the object
+ to adapt) and returning an object conforming to the `ISQLQuote`
+ protocol (e.g. exposing a `!getquoted()` method). The `AsIs` is
+ often useful for this task.
+
+ Once an object is registered, it can be safely used in SQL queries and by
+ the `adapt()` function.
+
+.. class:: ISQLQuote(wrapped_object)
+
+ Represents the SQL adaptation protocol. Objects conforming this protocol
+ should implement a `getquoted()` and optionally a `prepare()` method.
+
+ Adapters may subclass `!ISQLQuote`, but is not necessary: it is
+ enough to expose a `!getquoted()` method to be conforming.
+
+ .. attribute:: _wrapped
+
+ The wrapped object passes to the constructor
+
+ .. method:: getquoted()
+
+ Subclasses or other conforming objects should return a valid SQL
+ string representing the wrapped object. In Python 3 the SQL must be
+ returned in a `!bytes` object. The `!ISQLQuote` implementation does
+ nothing.
+
+ .. method:: prepare(conn)
+
+ Prepare the adapter for a connection. The method is optional: if
+ implemented, it will be invoked before `!getquoted()` with the
+ connection to adapt for as argument.
+
+ A conform object can implement this method if the SQL
+ representation depends on any server parameter, such as the server
+ version or the :envvar:`standard_conforming_string` setting. Container
+ objects may store the connection and use it to recursively prepare
+ contained objects: see the implementation for
+ `psycopg2.extensions.SQL_IN` for a simple example.
+
+
+.. class:: AsIs(object)
+
+ Adapter conform to the `ISQLQuote` protocol useful for objects
+ whose string representation is already valid as SQL representation.
+
+ .. method:: getquoted()
+
+ Return the `str()` conversion of the wrapped object.
+
+ >>> AsIs(42).getquoted()
+ '42'
+
+.. class:: QuotedString(str)
+
+ Adapter conform to the `ISQLQuote` protocol for string-like
+ objects.
+
+ .. method:: getquoted()
+
+ Return the string enclosed in single quotes. Any single quote appearing
+ in the string is escaped by doubling it according to SQL string
+ constants syntax. Backslashes are escaped too.
+
+ >>> QuotedString(r"O'Reilly").getquoted()
+ "'O''Reilly'"
+
+.. class:: Binary(str)
+
+ Adapter conform to the `ISQLQuote` protocol for binary objects.
+
+ .. method:: getquoted()
+
+ Return the string enclosed in single quotes. It performs the same
+ escaping of the `QuotedString` adapter, plus it knows how to
+ escape non-printable chars.
+
+ >>> Binary("\x00\x08\x0F").getquoted()
+ "'\\\\000\\\\010\\\\017'"
+
+ .. versionchanged:: 2.0.14
+ previously the adapter was not exposed by the `extensions`
+ module. In older versions it can be imported from the implementation
+ module `!psycopg2._psycopg`.
+
+
+
+.. class:: Boolean
+ Float
+ SQL_IN
+
+ Specialized adapters for builtin objects.
+
+.. class:: DateFromPy
+ TimeFromPy
+ TimestampFromPy
+ IntervalFromPy
+
+ Specialized adapters for Python datetime objects.
+
+.. data:: adapters
+
+ Dictionary of the currently registered object adapters. Use
+ `register_adapter()` to add an adapter for a new type.
+
+
+
+Database types casting functions
+--------------------------------
+
+These functions are used to manipulate type casters to convert from PostgreSQL
+types to Python objects. See :ref:`type-casting-from-sql-to-python` for
+details.
+
+.. function:: new_type(oids, name, adapter)
+
+ Create a new type caster to convert from a PostgreSQL type to a Python
+ object. The object created must be registered using
+ `register_type()` to be used.
+
+ :param oids: tuple of OIDs of the PostgreSQL type to convert.
+ :param name: the name of the new type adapter.
+ :param adapter: the adaptation function.
+
+ The object OID can be read from the `cursor.description` attribute
+ or by querying from the PostgreSQL catalog.
+
+ *adapter* should have signature :samp:`fun({value}, {cur})` where
+ *value* is the string representation returned by PostgreSQL and
+ *cur* is the cursor from which data are read. In case of
+ :sql:`NULL`, *value* will be `!None`. The adapter should return the
+ converted object.
+
+ See :ref:`type-casting-from-sql-to-python` for an usage example.
+
+
+.. function:: new_array_type(oids, name, base_caster)
+
+ Create a new type caster to convert from a PostgreSQL array type to a list
+ of Python object. The object created must be registered using
+ `register_type()` to be used.
+
+ :param oids: tuple of OIDs of the PostgreSQL type to convert. It should
+ probably contain the oid of the array type (e.g. the ``typarray``
+ field in the ``pg_type`` table).
+ :param name: the name of the new type adapter.
+ :param base_caster: a Psycopg typecaster, e.g. created using the
+ `new_type()` function. The caster should be able to parse a single
+ item of the desired type.
+
+ .. versionadded:: 2.4.3
+
+ .. _cast-array-unknown:
+
+ .. note::
+
+ The function can be used to create a generic array typecaster,
+ returning a list of strings: just use `psycopg2.STRING` as base
+ typecaster. For instance, if you want to receive an array of
+ :sql:`macaddr` from the database, each address represented by string,
+ you can use::
+
+ # select typarray from pg_type where typname = 'macaddr' -> 1040
+ psycopg2.extensions.register_type(
+ psycopg2.extensions.new_array_type(
+ (1040,), 'MACADDR[]', psycopg2.STRING))
+
+
+.. function:: register_type(obj [, scope])
+
+ Register a type caster created using `new_type()`.
+
+ If *scope* is specified, it should be a `connection` or a
+ `cursor`: the type caster will be effective only limited to the
+ specified object. Otherwise it will be globally registered.
+
+
+.. data:: string_types
+
+ The global register of type casters.
+
+
+.. index::
+ single: Encoding; Mapping
+
+.. data:: encodings
+
+ Mapping from `PostgreSQL encoding`__ to `Python encoding`__ names.
+ Used by Psycopg when adapting or casting unicode strings. See
+ :ref:`unicode-handling`.
+
+ .. __: https://www.postgresql.org/docs/current/static/multibyte.html
+ .. __: https://docs.python.org/library/codecs.html#standard-encodings
+
+
+
+.. index::
+ single: Exceptions; Additional
+
+.. _extension-exceptions:
+
+Additional exceptions
+---------------------
+
+The module exports a few exceptions in addition to the :ref:`standard ones
+<dbapi-exceptions>` defined by the |DBAPI|_.
+
+.. note::
+ From psycopg 2.8 these error classes are also exposed by the
+ `psycopg2.errors` module.
+
+
+.. exception:: QueryCanceledError
+
+ (subclasses `~psycopg2.OperationalError`)
+
+ Error related to SQL query cancellation. It can be trapped specifically to
+ detect a timeout.
+
+ .. versionadded:: 2.0.7
+
+
+.. exception:: TransactionRollbackError
+
+ (subclasses `~psycopg2.OperationalError`)
+
+ Error causing transaction rollback (deadlocks, serialization failures,
+ etc). It can be trapped specifically to detect a deadlock.
+
+ .. versionadded:: 2.0.7
+
+
+
+.. _coroutines-functions:
+
+Coroutines support functions
+----------------------------
+
+These functions are used to set and retrieve the callback function for
+:ref:`cooperation with coroutine libraries <green-support>`.
+
+.. versionadded:: 2.2
+
+.. autofunction:: set_wait_callback(f)
+
+.. autofunction:: get_wait_callback()
+
+
+
+Other functions
+---------------
+
+.. function:: libpq_version()
+
+ Return the version number of the ``libpq`` dynamic library loaded as an
+ integer, in the same format of `~connection.server_version`.
+
+ Raise `~psycopg2.NotSupportedError` if the ``psycopg2`` module was
+ compiled with a ``libpq`` version lesser than 9.1 (which can be detected
+ by the `~psycopg2.__libpq_version__` constant).
+
+ .. versionadded:: 2.7
+
+ .. seealso:: libpq docs for `PQlibVersion()`__.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQLIBVERSION
+
+
+.. function:: make_dsn(dsn=None, \*\*kwargs)
+
+ Create a valid connection string from arguments.
+
+ Put together the arguments in *kwargs* into a connection string. If *dsn*
+ is specified too, merge the arguments coming from both the sources. If the
+ same argument name is specified in both the sources, the *kwargs* value
+ overrides the *dsn* value.
+
+ The input arguments are validated: the output should always be a valid
+ connection string (as far as `parse_dsn()` is concerned). If not raise
+ `~psycopg2.ProgrammingError`.
+
+ Example::
+
+ >>> from psycopg2.extensions import make_dsn
+ >>> make_dsn('dbname=foo host=example.com', password="s3cr3t")
+ 'host=example.com password=s3cr3t dbname=foo'
+
+ .. versionadded:: 2.7
+
+
+.. function:: parse_dsn(dsn)
+
+ Parse connection string into a dictionary of keywords and values.
+
+ Parsing is delegated to the libpq: different versions of the client
+ library may support different formats or parameters (for example,
+ `connection URIs`__ are only supported from libpq 9.2). Raise
+ `~psycopg2.ProgrammingError` if the *dsn* is not valid.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
+
+ Example::
+
+ >>> from psycopg2.extensions import parse_dsn
+ >>> parse_dsn('dbname=test user=postgres password=secret')
+ {'password': 'secret', 'user': 'postgres', 'dbname': 'test'}
+ >>> parse_dsn("postgresql://someone@example.com/somedb?connect_timeout=10")
+ {'host': 'example.com', 'user': 'someone', 'dbname': 'somedb', 'connect_timeout': '10'}
+
+ .. versionadded:: 2.7
+
+ .. seealso:: libpq docs for `PQconninfoParse()`__.
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNINFOPARSE
+
+
+.. function:: quote_ident(str, scope)
+
+ Return quoted identifier according to PostgreSQL quoting rules.
+
+ The *scope* must be a `connection` or a `cursor`, the underlying
+ connection encoding is used for any necessary character conversion.
+
+ .. versionadded:: 2.7
+
+ .. seealso:: libpq docs for `PQescapeIdentifier()`__
+
+ .. __: https://www.postgresql.org/docs/current/static/libpq-exec.html#LIBPQ-PQESCAPEIDENTIFIER
+
+
+.. method:: encrypt_password(password, user, scope=None, algorithm=None)
+
+ Return the encrypted form of a PostgreSQL password.
+
+ :param password: the cleartext password to encrypt
+ :param user: the name of the user to use the password for
+ :param scope: the scope to encrypt the password into; if *algorithm* is
+ ``md5`` it can be `!None`
+ :type scope: `connection` or `cursor`
+ :param algorithm: the password encryption algorithm to use
+
+ The *algorithm* ``md5`` is always supported. Other algorithms are only
+ supported if the client libpq version is at least 10 and may require a
+ compatible server version: check the `PostgreSQL encryption
+ documentation`__ to know the algorithms supported by your server.
+
+ .. __: https://www.postgresql.org/docs/current/static/encryption-options.html
+
+ Using `!None` as *algorithm* will result in querying the server to know the
+ current server password encryption setting, which is a blocking operation:
+ query the server separately and specify a value for *algorithm* if you
+ want to maintain a non-blocking behaviour.
+
+ .. versionadded:: 2.8
+
+ .. seealso:: PostgreSQL docs for the `password_encryption`__ setting, libpq `PQencryptPasswordConn()`__, `PQencryptPassword()`__ functions.
+
+ .. __: https://www.postgresql.org/docs/current/static/runtime-config-connection.html#GUC-PASSWORD-ENCRYPTION
+ .. __: https://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQENCRYPTPASSWORDCONN
+ .. __: https://www.postgresql.org/docs/current/static/libpq-misc.html#LIBPQ-PQENCRYPTPASSWORD
+
+
+
+.. index::
+ pair: Isolation level; Constants
+
+.. _isolation-level-constants:
+
+Isolation level constants
+-------------------------
+
+Psycopg2 `connection` objects hold informations about the PostgreSQL
+`transaction isolation level`_. By default Psycopg doesn't change the default
+configuration of the server (`ISOLATION_LEVEL_DEFAULT`); the default for
+PostgreSQL servers is typically :sql:`READ COMMITTED`, but this may be changed
+in the server configuration files. A different isolation level can be set
+through the `~connection.set_isolation_level()` or `~connection.set_session()`
+methods. The level can be set to one of the following constants:
+
+.. data:: ISOLATION_LEVEL_AUTOCOMMIT
+
+ No transaction is started when commands are executed and no
+ `~connection.commit()` or `~connection.rollback()` is required.
+ Some PostgreSQL command such as :sql:`CREATE DATABASE` or :sql:`VACUUM`
+ can't run into a transaction: to run such command use::
+
+ >>> conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
+
+ See also :ref:`transactions-control`.
+
+.. data:: ISOLATION_LEVEL_READ_UNCOMMITTED
+
+ The :sql:`READ UNCOMMITTED` isolation level is defined in the SQL standard
+ but not available in the |MVCC| model of PostgreSQL: it is replaced by the
+ stricter :sql:`READ COMMITTED`.
+
+.. data:: ISOLATION_LEVEL_READ_COMMITTED
+
+ This is usually the default PostgreSQL value, but a different default may
+ be set in the database configuration.
+
+ A new transaction is started at the first `~cursor.execute()` command on a
+ cursor and at each new `!execute()` after a `~connection.commit()` or a
+ `~connection.rollback()`. The transaction runs in the PostgreSQL
+ :sql:`READ COMMITTED` isolation level: a :sql:`SELECT` query sees only
+ data committed before the query began; it never sees either uncommitted
+ data or changes committed during query execution by concurrent
+ transactions.
+
+ .. seealso:: `Read Committed Isolation Level`__ in PostgreSQL
+ documentation.
+
+ .. __: https://www.postgresql.org/docs/current/static/transaction-iso.html#XACT-READ-COMMITTED
+
+.. data:: ISOLATION_LEVEL_REPEATABLE_READ
+
+ As in `!ISOLATION_LEVEL_READ_COMMITTED`, a new transaction is started at
+ the first `~cursor.execute()` command. Transactions run at a
+ :sql:`REPEATABLE READ` isolation level: all the queries in a transaction
+ see a snapshot as of the start of the transaction, not as of the start of
+ the current query within the transaction. However applications using this
+ level must be prepared to retry transactions due to serialization
+ failures.
+
+ While this level provides a guarantee that each transaction sees a
+ completely stable view of the database, this view will not necessarily
+ always be consistent with some serial (one at a time) execution of
+ concurrent transactions of the same level.
+
+ .. versionchanged:: 2.4.2
+ The value was an alias for `!ISOLATION_LEVEL_SERIALIZABLE` before. The
+ two levels are distinct since PostgreSQL 9.1
+
+ .. seealso:: `Repeatable Read Isolation Level`__ in PostgreSQL
+ documentation.
+
+ .. __: https://www.postgresql.org/docs/current/static/transaction-iso.html#XACT-REPEATABLE-READ
+
+.. data:: ISOLATION_LEVEL_SERIALIZABLE
+
+ As in `!ISOLATION_LEVEL_READ_COMMITTED`, a new transaction is started at
+ the first `~cursor.execute()` command. Transactions run at a
+ :sql:`SERIALIZABLE` isolation level. This is the strictest transactions
+ isolation level, equivalent to having the transactions executed serially
+ rather than concurrently. However applications using this level must be
+ prepared to retry transactions due to serialization failures.
+
+ Starting from PostgreSQL 9.1, this mode monitors for conditions which
+ could make execution of a concurrent set of serializable transactions
+ behave in a manner inconsistent with all possible serial (one at a time)
+ executions of those transaction. In previous version the behaviour was the
+ same of the :sql:`REPEATABLE READ` isolation level.
+
+ .. seealso:: `Serializable Isolation Level`__ in PostgreSQL documentation.
+
+ .. __: https://www.postgresql.org/docs/current/static/transaction-iso.html#XACT-SERIALIZABLE
+
+.. data:: ISOLATION_LEVEL_DEFAULT
+
+ A new transaction is started at the first `~cursor.execute()` command, but
+ the isolation level is not explicitly selected by Psycopg: the server will
+ use whatever level is defined in its configuration or by statements
+ executed within the session outside Pyscopg control. If you want to know
+ what the value is you can use a query such as :sql:`show
+ transaction_isolation`.
+
+ .. versionadded:: 2.7
+
+
+.. index::
+ pair: Transaction status; Constants
+
+.. _transaction-status-constants:
+
+Transaction status constants
+----------------------------
+
+These values represent the possible status of a transaction: the current value
+can be read using the `connection.info.transaction_status` property.
+
+.. data:: TRANSACTION_STATUS_IDLE
+
+ The session is idle and there is no current transaction.
+
+.. data:: TRANSACTION_STATUS_ACTIVE
+
+ A command is currently in progress.
+
+.. data:: TRANSACTION_STATUS_INTRANS
+
+ The session is idle in a valid transaction block.
+
+.. data:: TRANSACTION_STATUS_INERROR
+
+ The session is idle in a failed transaction block.
+
+.. data:: TRANSACTION_STATUS_UNKNOWN
+
+ Reported if the connection with the server is bad.
+
+
+
+.. index::
+ pair: Connection status; Constants
+
+.. _connection-status-constants:
+
+Connection status constants
+---------------------------
+
+These values represent the possible status of a connection: the current value
+can be read from the `~connection.status` attribute.
+
+It is possible to find the connection in other status than the one shown below.
+Those are the only states in which a working connection is expected to be found
+during the execution of regular Python client code: other states are for
+internal usage and Python code should not rely on them.
+
+.. data:: STATUS_READY
+
+ Connection established. No transaction in progress.
+
+.. data:: STATUS_BEGIN
+
+ Connection established. A transaction is currently in progress.
+
+.. data:: STATUS_IN_TRANSACTION
+
+ An alias for `STATUS_BEGIN`
+
+.. data:: STATUS_PREPARED
+
+ The connection has been prepared for the second phase in a :ref:`two-phase
+ commit <tpc>` transaction. The connection can't be used to send commands
+ to the database until the transaction is finished with
+ `~connection.tpc_commit()` or `~connection.tpc_rollback()`.
+
+ .. versionadded:: 2.3
+
+
+
+.. index::
+ pair: Poll status; Constants
+
+.. _poll-constants:
+
+Poll constants
+--------------
+
+.. versionadded:: 2.2
+
+These values can be returned by `connection.poll()` during asynchronous
+connection and communication. They match the values in the libpq enum
+`!PostgresPollingStatusType`. See :ref:`async-support` and
+:ref:`green-support`.
+
+.. data:: POLL_OK
+
+ The data being read is available, or the file descriptor is ready for
+ writing: reading or writing will not block.
+
+.. data:: POLL_READ
+
+ Some data is being read from the backend, but it is not available yet on
+ the client and reading would block. Upon receiving this value, the client
+ should wait for the connection file descriptor to be ready *for reading*.
+ For example::
+
+ select.select([conn.fileno()], [], [])
+
+.. data:: POLL_WRITE
+
+ Some data is being sent to the backend but the connection file descriptor
+ can't currently accept new data. Upon receiving this value, the client
+ should wait for the connection file descriptor to be ready *for writing*.
+ For example::
+
+ select.select([], [conn.fileno()], [])
+
+.. data:: POLL_ERROR
+
+ There was a problem during connection polling. This value should actually
+ never be returned: in case of poll error usually an exception containing
+ the relevant details is raised.
+
+
+
+Additional database types
+-------------------------
+
+The `!extensions` module includes typecasters for many standard
+PostgreSQL types. These objects allow the conversion of returned data into
+Python objects. All the typecasters are automatically registered, except
+`UNICODE` and `UNICODEARRAY`: you can register them using
+`register_type()` in order to receive Unicode objects instead of strings
+from the database. See :ref:`unicode-handling` for details.
+
+.. data:: BOOLEAN
+ BYTES
+ DATE
+ DECIMAL
+ FLOAT
+ INTEGER
+ INTERVAL
+ LONGINTEGER
+ TIME
+ UNICODE
+
+ Typecasters for basic types. Note that a few other ones (`~psycopg2.BINARY`,
+ `~psycopg2.DATETIME`, `~psycopg2.NUMBER`, `~psycopg2.ROWID`,
+ `~psycopg2.STRING`) are exposed by the `psycopg2` module for |DBAPI|_
+ compliance.
+
+.. data:: BINARYARRAY
+ BOOLEANARRAY
+ BYTESARRAY
+ DATEARRAY
+ DATETIMEARRAY
+ DECIMALARRAY
+ FLOATARRAY
+ INTEGERARRAY
+ INTERVALARRAY
+ LONGINTEGERARRAY
+ ROWIDARRAY
+ STRINGARRAY
+ TIMEARRAY
+ UNICODEARRAY
+
+ Typecasters to convert arrays of sql types into Python lists.
+
+.. data:: PYDATE
+ PYDATETIME
+ PYDATETIMETZ
+ PYINTERVAL
+ PYTIME
+ PYDATEARRAY
+ PYDATETIMEARRAY
+ PYDATETIMETZARRAY
+ PYINTERVALARRAY
+ PYTIMEARRAY
+
+ Typecasters to convert time-related data types to Python `!datetime`
+ objects.
+
+.. versionchanged:: 2.2
+ previously the `DECIMAL` typecaster and the specific time-related
+ typecasters (`!PY*` and `!MX*`) were not exposed by the `extensions`
+ module. In older versions they can be imported from the implementation
+ module `!psycopg2._psycopg`.
+
+.. versionadded:: 2.7.2
+ the `!*DATETIMETZ*` objects.
+
+.. versionadded:: 2.8
+ the `!BYTES` and `BYTESARRAY` objects.
diff --git a/doc/src/extras.rst b/doc/src/extras.rst
new file mode 100644
index 0000000..96f801b
--- /dev/null
+++ b/doc/src/extras.rst
@@ -0,0 +1,1085 @@
+`psycopg2.extras` -- Miscellaneous goodies for Psycopg 2
+=============================================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. module:: psycopg2.extras
+
+.. testsetup::
+
+ import psycopg2.extras
+ from psycopg2.extras import Inet
+
+ create_test_table()
+
+This module is a generic place used to hold little helper functions and
+classes until a better place in the distribution is found.
+
+
+.. _cursor-subclasses:
+
+Connection and cursor subclasses
+--------------------------------
+
+A few objects that change the way the results are returned by the cursor or
+modify the object behavior in some other way. Typically `!cursor` subclasses
+are passed as *cursor_factory* argument to `~psycopg2.connect()` so that the
+connection's `~connection.cursor()` method will generate objects of this
+class. Alternatively a `!cursor` subclass can be used one-off by passing it
+as the *cursor_factory* argument to the `!cursor()` method.
+
+If you want to use a `!connection` subclass you can pass it as the
+*connection_factory* argument of the `!connect()` function.
+
+
+.. index::
+ pair: Cursor; Dictionary
+
+.. _dict-cursor:
+
+
+Dictionary-like cursor
+^^^^^^^^^^^^^^^^^^^^^^
+
+The dict cursors allow to access to the attributes of retrieved records
+using an interface similar to the Python dictionaries instead of the tuples.
+
+ >>> dict_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
+ >>> dict_cur.execute("INSERT INTO test (num, data) VALUES(%s, %s)",
+ ... (100, "abc'def"))
+ >>> dict_cur.execute("SELECT * FROM test")
+ >>> rec = dict_cur.fetchone()
+ >>> rec['id']
+ 1
+ >>> rec['num']
+ 100
+ >>> rec['data']
+ "abc'def"
+
+The records still support indexing as the original tuple:
+
+ >>> rec[2]
+ "abc'def"
+
+
+.. autoclass:: DictCursor
+
+.. autoclass:: DictConnection
+
+ .. note::
+
+ Not very useful since Psycopg 2.5: you can use `psycopg2.connect`\
+ ``(dsn, cursor_factory=DictCursor)`` instead of `!DictConnection`.
+
+.. autoclass:: DictRow
+
+
+Real dictionary cursor
+^^^^^^^^^^^^^^^^^^^^^^
+
+.. autoclass:: RealDictCursor
+
+.. autoclass:: RealDictConnection
+
+ .. note::
+
+ Not very useful since Psycopg 2.5: you can use `psycopg2.connect`\
+ ``(dsn, cursor_factory=RealDictCursor)`` instead of
+ `!RealDictConnection`.
+
+.. autoclass:: RealDictRow
+
+
+
+.. index::
+ pair: Cursor; namedtuple
+
+`namedtuple` cursor
+^^^^^^^^^^^^^^^^^^^^
+
+.. versionadded:: 2.3
+
+.. autoclass:: NamedTupleCursor
+
+.. autoclass:: NamedTupleConnection
+
+ .. note::
+
+ Not very useful since Psycopg 2.5: you can use `psycopg2.connect`\
+ ``(dsn, cursor_factory=NamedTupleCursor)`` instead of
+ `!NamedTupleConnection`.
+
+
+.. index::
+ pair: Cursor; Logging
+
+Logging cursor
+^^^^^^^^^^^^^^
+
+.. autoclass:: LoggingConnection
+ :members: initialize,filter
+
+.. autoclass:: LoggingCursor
+
+
+.. note::
+
+ Queries that are executed with `cursor.executemany()` are not logged.
+
+
+.. autoclass:: MinTimeLoggingConnection
+ :members: initialize,filter
+
+.. autoclass:: MinTimeLoggingCursor
+
+
+
+.. _replication-objects:
+
+Replication support objects
+---------------------------
+
+See :ref:`replication-support` for an introduction to the topic.
+
+
+The following replication types are defined:
+
+.. data:: REPLICATION_LOGICAL
+.. data:: REPLICATION_PHYSICAL
+
+
+.. index::
+ pair: Connection; replication
+
+.. autoclass:: LogicalReplicationConnection
+
+ This connection factory class can be used to open a special type of
+ connection that is used for logical replication.
+
+ Example::
+
+ from psycopg2.extras import LogicalReplicationConnection
+ log_conn = psycopg2.connect(dsn, connection_factory=LogicalReplicationConnection)
+ log_cur = log_conn.cursor()
+
+
+.. autoclass:: PhysicalReplicationConnection
+
+ This connection factory class can be used to open a special type of
+ connection that is used for physical replication.
+
+ Example::
+
+ from psycopg2.extras import PhysicalReplicationConnection
+ phys_conn = psycopg2.connect(dsn, connection_factory=PhysicalReplicationConnection)
+ phys_cur = phys_conn.cursor()
+
+ Both `LogicalReplicationConnection` and `PhysicalReplicationConnection` use
+ `ReplicationCursor` for actual communication with the server.
+
+
+.. index::
+ pair: Message; replication
+
+The individual messages in the replication stream are represented by
+`ReplicationMessage` objects (both logical and physical type):
+
+.. autoclass:: ReplicationMessage
+
+ .. attribute:: payload
+
+ The actual data received from the server.
+
+ An instance of either `bytes()` or `unicode()`, depending on the value
+ of `decode` option passed to `~ReplicationCursor.start_replication()`
+ on the connection. See `~ReplicationCursor.read_message()` for
+ details.
+
+ .. attribute:: data_size
+
+ The raw size of the message payload (before possible unicode
+ conversion).
+
+ .. attribute:: data_start
+
+ LSN position of the start of the message.
+
+ .. attribute:: wal_end
+
+ LSN position of the current end of WAL on the server.
+
+ .. attribute:: send_time
+
+ A `~datetime` object representing the server timestamp at the moment
+ when the message was sent.
+
+ .. attribute:: cursor
+
+ A reference to the corresponding `ReplicationCursor` object.
+
+
+.. index::
+ pair: Cursor; replication
+
+.. autoclass:: ReplicationCursor
+
+ .. method:: create_replication_slot(slot_name, slot_type=None, output_plugin=None)
+
+ Create streaming replication slot.
+
+ :param slot_name: name of the replication slot to be created
+ :param slot_type: type of replication: should be either
+ `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL`
+ :param output_plugin: name of the logical decoding output plugin to be
+ used by the slot; required for logical
+ replication connections, disallowed for physical
+
+ Example::
+
+ log_cur.create_replication_slot("logical1", "test_decoding")
+ phys_cur.create_replication_slot("physical1")
+
+ # either logical or physical replication connection
+ cur.create_replication_slot("slot1", slot_type=REPLICATION_LOGICAL)
+
+ When creating a slot on a logical replication connection, a logical
+ replication slot is created by default. Logical replication requires
+ name of the logical decoding output plugin to be specified.
+
+ When creating a slot on a physical replication connection, a physical
+ replication slot is created by default. No output plugin parameter is
+ required or allowed when creating a physical replication slot.
+
+ In either case the type of slot being created can be specified
+ explicitly using *slot_type* parameter.
+
+ Replication slots are a feature of PostgreSQL server starting with
+ version 9.4.
+
+ .. method:: drop_replication_slot(slot_name)
+
+ Drop streaming replication slot.
+
+ :param slot_name: name of the replication slot to drop
+
+ Example::
+
+ # either logical or physical replication connection
+ cur.drop_replication_slot("slot1")
+
+ Replication slots are a feature of PostgreSQL server starting with
+ version 9.4.
+
+ .. method:: start_replication(slot_name=None, slot_type=None, start_lsn=0, timeline=0, options=None, decode=False, status_interval=10)
+
+ Start replication on the connection.
+
+ :param slot_name: name of the replication slot to use; required for
+ logical replication, physical replication can work
+ with or without a slot
+ :param slot_type: type of replication: should be either
+ `REPLICATION_LOGICAL` or `REPLICATION_PHYSICAL`
+ :param start_lsn: the optional LSN position to start replicating from,
+ can be an integer or a string of hexadecimal digits
+ in the form ``XXX/XXX``
+ :param timeline: WAL history timeline to start streaming from (optional,
+ can only be used with physical replication)
+ :param options: a dictionary of options to pass to logical replication
+ slot (not allowed with physical replication)
+ :param decode: a flag indicating that unicode conversion should be
+ performed on messages received from the server
+ :param status_interval: time between feedback packets sent to the server
+
+ If a *slot_name* is specified, the slot must exist on the server and
+ its type must match the replication type used.
+
+ If not specified using *slot_type* parameter, the type of replication
+ is defined by the type of replication connection. Logical replication
+ is only allowed on logical replication connection, but physical
+ replication can be used with both types of connection.
+
+ On the other hand, physical replication doesn't require a named
+ replication slot to be used, only logical replication does. In any
+ case logical replication and replication slots are a feature of
+ PostgreSQL server starting with version 9.4. Physical replication can
+ be used starting with 9.0.
+
+ If *start_lsn* is specified, the requested stream will start from that
+ LSN. The default is `!None` which passes the LSN ``0/0`` causing
+ replay to begin at the last point for which the server got flush
+ confirmation from the client, or the oldest available point for a new
+ slot.
+
+ The server might produce an error if a WAL file for the given LSN has
+ already been recycled or it may silently start streaming from a later
+ position: the client can verify the actual position using information
+ provided by the `ReplicationMessage` attributes. The exact server
+ behavior depends on the type of replication and use of slots.
+
+ The *timeline* parameter can only be specified with physical
+ replication and only starting with server version 9.3.
+
+ A dictionary of *options* may be passed to the logical decoding plugin
+ on a logical replication slot. The set of supported options depends
+ on the output plugin that was used to create the slot. Must be
+ `!None` for physical replication.
+
+ If *decode* is set to `!True` the messages received from the server
+ would be converted according to the connection `~connection.encoding`.
+ *This parameter should not be set with physical replication or with
+ logical replication plugins that produce binary output.*
+
+ Replication stream should periodically send feedback to the database
+ to prevent disconnect via timeout. Feedback is automatically sent when
+ `read_message()` is called or during run of the `consume_stream()`.
+ To specify the feedback interval use *status_interval* parameter.
+ The value of this parameter must be set to at least 1 second, but
+ it can have a fractional part.
+
+
+ This function constructs a |START_REPLICATION|_ command and calls
+ `start_replication_expert()` internally.
+
+ After starting the replication, to actually consume the incoming
+ server messages use `consume_stream()` or implement a loop around
+ `read_message()` in case of :ref:`asynchronous connection
+ <async-support>`.
+
+ .. versionchanged:: 2.8.3
+ added the *status_interval* parameter.
+
+ .. |START_REPLICATION| replace:: :sql:`START_REPLICATION`
+ .. _START_REPLICATION: https://www.postgresql.org/docs/current/static/protocol-replication.html
+
+ .. method:: start_replication_expert(command, decode=False, status_interval=10)
+
+ Start replication on the connection using provided
+ |START_REPLICATION|_ command.
+
+ :param command: The full replication command. It can be a string or a
+ `~psycopg2.sql.Composable` instance for dynamic generation.
+ :param decode: a flag indicating that unicode conversion should be
+ performed on messages received from the server.
+ :param status_interval: time between feedback packets sent to the server
+
+ .. versionchanged:: 2.8.3
+ added the *status_interval* parameter.
+
+
+ .. method:: consume_stream(consume, keepalive_interval=None)
+
+ :param consume: a callable object with signature :samp:`consume({msg})`
+ :param keepalive_interval: interval (in seconds) to send keepalive
+ messages to the server
+
+ This method can only be used with synchronous connection. For
+ asynchronous connections see `read_message()`.
+
+ Before using this method to consume the stream call
+ `start_replication()` first.
+
+ This method enters an endless loop reading messages from the server
+ and passing them to ``consume()`` one at a time, then waiting for more
+ messages from the server. In order to make this method break out of
+ the loop and return, ``consume()`` can throw a `StopReplication`
+ exception. Any unhandled exception will make it break out of the loop
+ as well.
+
+ The *msg* object passed to ``consume()`` is an instance of
+ `ReplicationMessage` class. See `read_message()` for details about
+ message decoding.
+
+ This method also sends feedback messages to the server every
+ *keepalive_interval* (in seconds). The value of this parameter must
+ be set to at least 1 second, but it can have a fractional part.
+ If the *keepalive_interval* is not specified, the value of
+ *status_interval* specified in the `start_replication()` or
+ `start_replication_expert()` will be used.
+
+ The client must confirm every processed message by calling
+ `send_feedback()` method on the corresponding replication cursor. A
+ reference to the cursor is provided in the `ReplicationMessage` as an
+ attribute.
+
+ The following example is a sketch implementation of ``consume()``
+ callable for logical replication::
+
+ class LogicalStreamConsumer(object):
+
+ # ...
+
+ def __call__(self, msg):
+ self.process_message(msg.payload)
+ msg.cursor.send_feedback(flush_lsn=msg.data_start)
+
+ consumer = LogicalStreamConsumer()
+ cur.consume_stream(consumer)
+
+ .. warning::
+
+ When using replication with slots, failure to constantly consume
+ *and* report success to the server appropriately can eventually
+ lead to "disk full" condition on the server, because the server
+ retains all the WAL segments that might be needed to stream the
+ changes via all of the currently open replication slots.
+
+ .. versionchanged:: 2.8.3
+ changed the default value of the *keepalive_interval* parameter to `!None`.
+
+ .. method:: send_feedback(write_lsn=0, flush_lsn=0, apply_lsn=0, reply=False, force=False)
+
+ :param write_lsn: a LSN position up to which the client has written the data locally
+ :param flush_lsn: a LSN position up to which the client has processed the
+ data reliably (the server is allowed to discard all
+ and every data that predates this LSN)
+ :param apply_lsn: a LSN position up to which the warm standby server
+ has applied the changes (physical replication
+ master-slave protocol only)
+ :param reply: request the server to send back a keepalive message immediately
+ :param force: force sending a feedback message regardless of status_interval timeout
+
+ Use this method to report to the server that all messages up to a
+ certain LSN position have been processed on the client and may be
+ discarded on the server.
+
+ If the *reply* or *force* parameters are not set, this method will
+ just update internal structures without sending the feedback message
+ to the server. The library sends feedback message automatically
+ when *status_interval* timeout is reached. For this to work, you must
+ call `send_feedback()` on the same Cursor that you called `start_replication()`
+ on (the one in `message.cursor`) or your feedback will be lost.
+
+ .. versionchanged:: 2.8.3
+ added the *force* parameter.
+
+ Low-level replication cursor methods for :ref:`asynchronous connection
+ <async-support>` operation.
+
+ With the synchronous connection a call to `consume_stream()` handles all
+ the complexity of handling the incoming messages and sending keepalive
+ replies, but at times it might be beneficial to use low-level interface
+ for better control, in particular to `~select` on multiple sockets. The
+ following methods are provided for asynchronous operation:
+
+ .. method:: read_message()
+
+ Try to read the next message from the server without blocking and
+ return an instance of `ReplicationMessage` or `!None`, in case there
+ are no more data messages from the server at the moment.
+
+ This method should be used in a loop with asynchronous connections
+ (after calling `start_replication()` once). For synchronous
+ connections see `consume_stream()`.
+
+ The returned message's `~ReplicationMessage.payload` is an instance of
+ `!unicode` decoded according to connection `~connection.encoding`
+ *iff* *decode* was set to `!True` in the initial call to
+ `start_replication()` on this connection, otherwise it is an instance
+ of `!bytes` with no decoding.
+
+ It is expected that the calling code will call this method repeatedly
+ in order to consume all of the messages that might have been buffered
+ until `!None` is returned. After receiving `!None` from this method
+ the caller should use `~select.select()` or `~select.poll()` on the
+ corresponding connection to block the process until there is more data
+ from the server.
+
+ Last, but not least, this method sends feedback messages when
+ *status_interval* timeout is reached or when keepalive message with
+ reply request arrived from the server.
+
+ .. method:: fileno()
+
+ Call the corresponding connection's `~connection.fileno()` method and
+ return the result.
+
+ This is a convenience method which allows replication cursor to be
+ used directly in `~select.select()` or `~select.poll()` calls.
+
+ .. attribute:: io_timestamp
+
+ A `~datetime` object representing the timestamp at the moment of last
+ communication with the server (a data or keepalive message in either
+ direction).
+
+ .. attribute:: feedback_timestamp
+
+ A `~datetime` object representing the timestamp at the moment when
+ the last feedback message sent to the server.
+
+ .. versionadded:: 2.8.3
+
+ .. attribute:: wal_end
+
+ LSN position of the current end of WAL on the server at the
+ moment of last data or keepalive message received from the
+ server.
+
+ .. versionadded:: 2.8
+
+ An actual example of asynchronous operation might look like this::
+
+ from select import select
+ from datetime import datetime
+
+ def consume(msg):
+ # ...
+ msg.cursor.send_feedback(flush_lsn=msg.data_start)
+
+ status_interval = 10.0
+ while True:
+ msg = cur.read_message()
+ if msg:
+ consume(msg)
+ else:
+ now = datetime.now()
+ timeout = status_interval - (now - cur.feedback_timestamp).total_seconds()
+ try:
+ sel = select([cur], [], [], max(0, timeout))
+ except InterruptedError:
+ pass # recalculate timeout and continue
+
+.. index::
+ pair: Cursor; Replication
+
+.. autoclass:: StopReplication
+
+
+.. index::
+ single: Data types; Additional
+
+Additional data types
+---------------------
+
+
+.. index::
+ pair: JSON; Data types
+ pair: JSON; Adaptation
+
+.. _adapt-json:
+
+JSON_ adaptation
+^^^^^^^^^^^^^^^^
+
+.. versionadded:: 2.5
+.. versionchanged:: 2.5.4
+ added |jsonb| support. In previous versions |jsonb| values are returned
+ as strings. See :ref:`the FAQ <faq-jsonb-adapt>` for a workaround.
+
+Psycopg can adapt Python objects to and from the PostgreSQL |jsons|_
+types. With PostgreSQL 9.2 and following versions adaptation is
+available out-of-the-box. To use JSON data with previous database versions
+(either with the `9.1 json extension`__, but even if you want to convert text
+fields to JSON) you can use the `register_json()` function.
+
+.. __: http://people.planetpostgresql.org/andrew/index.php?/archives/255-JSON-for-PG-9.2-...-and-now-for-9.1!.html
+
+The Python :py:mod:`json` module is used by default to convert Python objects
+to JSON and to parse data from the database.
+
+.. _JSON: https://www.json.org/
+.. |json| replace:: :sql:`json`
+.. |jsonb| replace:: :sql:`jsonb`
+.. |jsons| replace:: |json| and |jsonb|
+.. _jsons: https://www.postgresql.org/docs/current/static/datatype-json.html
+
+In order to pass a Python object to the database as query argument you can use
+the `Json` adapter::
+
+ curs.execute("insert into mytable (jsondata) values (%s)",
+ [Json({'a': 100})])
+
+Reading from the database, |json| and |jsonb| values will be automatically
+converted to Python objects.
+
+.. note::
+
+ If you are using the PostgreSQL :sql:`json` data type but you want to read
+ it as string in Python instead of having it parsed, your can either cast
+ the column to :sql:`text` in the query (it is an efficient operation, that
+ doesn't involve a copy)::
+
+ cur.execute("select jsondata::text from mytable")
+
+ or you can register a no-op `!loads()` function with
+ `register_default_json()`::
+
+ psycopg2.extras.register_default_json(loads=lambda x: x)
+
+.. note::
+
+ You can use `~psycopg2.extensions.register_adapter()` to adapt any Python
+ dictionary to JSON, either registering `Json` or any subclass or factory
+ creating a compatible adapter::
+
+ psycopg2.extensions.register_adapter(dict, psycopg2.extras.Json)
+
+ This setting is global though, so it is not compatible with similar
+ adapters such as the one registered by `register_hstore()`. Any other
+ object supported by JSON can be registered the same way, but this will
+ clobber the default adaptation rule, so be careful to unwanted side
+ effects.
+
+If you want to customize the adaptation from Python to PostgreSQL you can
+either provide a custom `!dumps()` function to `Json`::
+
+ curs.execute("insert into mytable (jsondata) values (%s)",
+ [Json({'a': 100}, dumps=simplejson.dumps)])
+
+or you can subclass it overriding the `~Json.dumps()` method::
+
+ class MyJson(Json):
+ def dumps(self, obj):
+ return simplejson.dumps(obj)
+
+ curs.execute("insert into mytable (jsondata) values (%s)",
+ [MyJson({'a': 100})])
+
+Customizing the conversion from PostgreSQL to Python can be done passing a
+custom `!loads()` function to `register_json()`. For the builtin data types
+(|json| from PostgreSQL 9.2, |jsonb| from PostgreSQL 9.4) use
+`register_default_json()` and `register_default_jsonb()`. For example, if you
+want to convert the float values from :sql:`json` into
+:py:class:`~decimal.Decimal` you can use::
+
+ loads = lambda x: json.loads(x, parse_float=Decimal)
+ psycopg2.extras.register_json(conn, loads=loads)
+
+Or, if you want to use an alternative JSON module implementation, such as the
+faster UltraJSON_, you can use::
+
+ psycopg2.extras.register_default_json(loads=ujson.loads, globally=True)
+ psycopg2.extras.register_default_jsonb(loads=ujson.loads, globally=True)
+
+.. _UltraJSON: https://pypi.org/project/ujson/
+
+
+.. autoclass:: Json
+
+ .. automethod:: dumps
+
+.. autofunction:: register_json
+
+ .. versionchanged:: 2.5.4
+ added the *name* parameter to enable :sql:`jsonb` support.
+
+.. autofunction:: register_default_json
+
+.. autofunction:: register_default_jsonb
+
+ .. versionadded:: 2.5.4
+
+
+
+.. index::
+ pair: hstore; Data types
+ pair: dict; Adaptation
+
+.. _adapt-hstore:
+
+Hstore data type
+^^^^^^^^^^^^^^^^
+
+.. versionadded:: 2.3
+
+The |hstore|_ data type is a key-value store embedded in PostgreSQL. It has
+been available for several server versions but with the release 9.0 it has
+been greatly improved in capacity and usefulness with the addition of many
+functions. It supports GiST or GIN indexes allowing search by keys or
+key/value pairs as well as regular BTree indexes for equality, uniqueness etc.
+
+Psycopg can convert Python `!dict` objects to and from |hstore| structures.
+Only dictionaries with string/unicode keys and values are supported. `!None`
+is also allowed as value but not as a key. Psycopg uses a more efficient |hstore|
+representation when dealing with PostgreSQL 9.0 but previous server versions
+are supported as well. By default the adapter/typecaster are disabled: they
+can be enabled using the `register_hstore()` function.
+
+.. autofunction:: register_hstore
+
+ .. versionchanged:: 2.4
+ added the *oid* parameter. If not specified, the typecaster is
+ installed also if |hstore| is not installed in the :sql:`public`
+ schema.
+
+ .. versionchanged:: 2.4.3
+ added support for |hstore| array.
+
+
+.. |hstore| replace:: :sql:`hstore`
+.. _hstore: https://www.postgresql.org/docs/current/static/hstore.html
+
+
+
+.. index::
+ pair: Composite types; Data types
+ pair: tuple; Adaptation
+ pair: namedtuple; Adaptation
+
+.. _adapt-composite:
+
+Composite types casting
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. versionadded:: 2.4
+
+Using `register_composite()` it is possible to cast a PostgreSQL composite
+type (either created with the |CREATE TYPE|_ command or implicitly defined
+after a table row type) into a Python named tuple, or into a regular tuple if
+:py:func:`collections.namedtuple` is not found.
+
+.. |CREATE TYPE| replace:: :sql:`CREATE TYPE`
+.. _CREATE TYPE: https://www.postgresql.org/docs/current/static/sql-createtype.html
+
+.. doctest::
+
+ >>> cur.execute("CREATE TYPE card AS (value int, suit text);")
+ >>> psycopg2.extras.register_composite('card', cur)
+ <psycopg2.extras.CompositeCaster object at 0x...>
+
+ >>> cur.execute("select (8, 'hearts')::card")
+ >>> cur.fetchone()[0]
+ card(value=8, suit='hearts')
+
+Nested composite types are handled as expected, provided that the type of the
+composite components are registered as well.
+
+.. doctest::
+
+ >>> cur.execute("CREATE TYPE card_back AS (face card, back text);")
+ >>> psycopg2.extras.register_composite('card_back', cur)
+ <psycopg2.extras.CompositeCaster object at 0x...>
+
+ >>> cur.execute("select ((8, 'hearts'), 'blue')::card_back")
+ >>> cur.fetchone()[0]
+ card_back(face=card(value=8, suit='hearts'), back='blue')
+
+Adaptation from Python tuples to composite types is automatic instead and
+requires no adapter registration.
+
+
+.. _custom-composite:
+
+.. Note::
+
+ If you want to convert PostgreSQL composite types into something different
+ than a `!namedtuple` you can subclass the `CompositeCaster` overriding
+ `~CompositeCaster.make()`. For example, if you want to convert your type
+ into a Python dictionary you can use::
+
+ >>> class DictComposite(psycopg2.extras.CompositeCaster):
+ ... def make(self, values):
+ ... return dict(zip(self.attnames, values))
+
+ >>> psycopg2.extras.register_composite('card', cur,
+ ... factory=DictComposite)
+
+ >>> cur.execute("select (8, 'hearts')::card")
+ >>> cur.fetchone()[0]
+ {'suit': 'hearts', 'value': 8}
+
+
+.. autofunction:: register_composite
+
+ .. versionchanged:: 2.4.3
+ added support for array of composite types
+ .. versionchanged:: 2.5
+ added the *factory* parameter
+
+
+.. autoclass:: CompositeCaster
+
+ .. automethod:: make
+
+ .. versionadded:: 2.5
+
+ Object attributes:
+
+ .. attribute:: name
+
+ The name of the PostgreSQL type.
+
+ .. attribute:: schema
+
+ The schema where the type is defined.
+
+ .. versionadded:: 2.5
+
+ .. attribute:: oid
+
+ The oid of the PostgreSQL type.
+
+ .. attribute:: array_oid
+
+ The oid of the PostgreSQL array type, if available.
+
+ .. attribute:: type
+
+ The type of the Python objects returned. If :py:func:`collections.namedtuple()`
+ is available, it is a named tuple with attributes equal to the type
+ components. Otherwise it is just the `!tuple` object.
+
+ .. attribute:: attnames
+
+ List of component names of the type to be casted.
+
+ .. attribute:: atttypes
+
+ List of component type oids of the type to be casted.
+
+
+.. index::
+ pair: range; Data types
+
+.. _adapt-range:
+
+Range data types
+^^^^^^^^^^^^^^^^
+
+.. versionadded:: 2.5
+
+Psycopg offers a `Range` Python type and supports adaptation between them and
+PostgreSQL |range|_ types. Builtin |range| types are supported out-of-the-box;
+user-defined |range| types can be adapted using `register_range()`.
+
+.. |range| replace:: :sql:`range`
+.. _range: https://www.postgresql.org/docs/current/static/rangetypes.html
+
+.. autoclass:: Range
+
+ This Python type is only used to pass and retrieve range values to and
+ from PostgreSQL and doesn't attempt to replicate the PostgreSQL range
+ features: it doesn't perform normalization and doesn't implement all the
+ operators__ supported by the database.
+
+ .. __: https://www.postgresql.org/docs/current/static/functions-range.html#RANGE-OPERATORS-TABLE
+
+ `!Range` objects are immutable, hashable, and support the ``in`` operator
+ (checking if an element is within the range). They can be tested for
+ equivalence. Empty ranges evaluate to `!False` in boolean context,
+ nonempty evaluate to `!True`.
+
+ .. versionchanged:: 2.5.3
+
+ `!Range` objects can be sorted although, as on the server-side, this
+ ordering is not particularly meangingful. It is only meant to be used
+ by programs assuming objects using `!Range` as primary key can be
+ sorted on them. In previous versions comparing `!Range`\s raises
+ `!TypeError`.
+
+ Although it is possible to instantiate `!Range` objects, the class doesn't
+ have an adapter registered, so you cannot normally pass these instances as
+ query arguments. To use range objects as query arguments you can either
+ use one of the provided subclasses, such as `NumericRange` or create a
+ custom subclass using `register_range()`.
+
+ Object attributes:
+
+ .. autoattribute:: isempty
+ .. autoattribute:: lower
+ .. autoattribute:: upper
+ .. autoattribute:: lower_inc
+ .. autoattribute:: upper_inc
+ .. autoattribute:: lower_inf
+ .. autoattribute:: upper_inf
+
+
+The following `Range` subclasses map builtin PostgreSQL |range| types to
+Python objects: they have an adapter registered so their instances can be
+passed as query arguments. |range| values read from database queries are
+automatically casted into instances of these classes.
+
+.. autoclass:: NumericRange
+.. autoclass:: DateRange
+.. autoclass:: DateTimeRange
+.. autoclass:: DateTimeTZRange
+
+.. note::
+
+ Python lacks a representation for :sql:`infinity` date so Psycopg converts
+ the value to `date.max` and such. When written into the database these
+ dates will assume their literal value (e.g. :sql:`9999-12-31` instead of
+ :sql:`infinity`). Check :ref:`infinite-dates-handling` for an example of
+ an alternative adapter to map `date.max` to :sql:`infinity`. An
+ alternative dates adapter will be used automatically by the `DateRange`
+ adapter and so on.
+
+
+Custom |range| types (created with |CREATE TYPE|_ :sql:`... AS RANGE`) can be
+adapted to a custom `Range` subclass:
+
+.. autofunction:: register_range
+
+.. autoclass:: RangeCaster
+
+ Object attributes:
+
+ .. attribute:: range
+
+ The `!Range` subclass adapted.
+
+ .. attribute:: adapter
+
+ The `~psycopg2.extensions.ISQLQuote` responsible to adapt `!range`.
+
+ .. attribute:: typecaster
+
+ The object responsible for casting.
+
+ .. attribute:: array_typecaster
+
+ The object responsible to cast arrays, if available, else `!None`.
+
+
+
+.. index::
+ pair: UUID; Data types
+
+.. _adapt-uuid:
+
+UUID data type
+^^^^^^^^^^^^^^
+
+.. versionadded:: 2.0.9
+.. versionchanged:: 2.0.13 added UUID array support.
+
+.. doctest::
+
+ >>> psycopg2.extras.register_uuid()
+ <psycopg2._psycopg.type object at 0x...>
+
+ >>> # Python UUID can be used in SQL queries
+ >>> import uuid
+ >>> my_uuid = uuid.UUID('{12345678-1234-5678-1234-567812345678}')
+ >>> psycopg2.extensions.adapt(my_uuid).getquoted()
+ "'12345678-1234-5678-1234-567812345678'::uuid"
+
+ >>> # PostgreSQL UUID are transformed into Python UUID objects.
+ >>> cur.execute("SELECT 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'::uuid")
+ >>> cur.fetchone()[0]
+ UUID('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11')
+
+
+.. autofunction:: register_uuid
+
+.. autoclass:: UUID_adapter
+
+
+
+.. index::
+ pair: INET; Data types
+ pair: CIDR; Data types
+ pair: MACADDR; Data types
+
+.. _adapt-network:
+
+Networking data types
+^^^^^^^^^^^^^^^^^^^^^
+
+By default Psycopg casts the PostgreSQL networking data types (:sql:`inet`,
+:sql:`cidr`, :sql:`macaddr`) into ordinary strings; array of such types are
+converted into lists of strings.
+
+.. versionchanged:: 2.7
+ in previous version array of networking types were not treated as arrays.
+
+.. autofunction:: register_ipaddress
+
+
+.. autofunction:: register_inet
+
+ .. deprecated:: 2.7
+ this function will not receive further development and may disappear in
+ future versions.
+
+.. doctest::
+
+ >>> psycopg2.extras.register_inet()
+ <psycopg2._psycopg.type object at 0x...>
+
+ >>> cur.mogrify("SELECT %s", (Inet('127.0.0.1/32'),))
+ "SELECT E'127.0.0.1/32'::inet"
+
+ >>> cur.execute("SELECT '192.168.0.1/24'::inet")
+ >>> cur.fetchone()[0].addr
+ '192.168.0.1/24'
+
+
+.. autoclass:: Inet
+
+ .. deprecated:: 2.7
+ this object will not receive further development and may disappear in
+ future versions.
+
+
+
+.. _fast-exec:
+
+Fast execution helpers
+----------------------
+
+The current implementation of `~cursor.executemany()` is (using an extremely
+charitable understatement) not particularly performing. These functions can
+be used to speed up the repeated execution of a statement against a set of
+parameters. By reducing the number of server roundtrips the performance can be
+`orders of magnitude better`__ than using `!executemany()`.
+
+.. __: https://github.com/psycopg/psycopg2/issues/491#issuecomment-276551038
+
+
+.. autofunction:: execute_batch
+
+ .. versionadded:: 2.7
+
+.. note::
+
+ `!execute_batch()` can be also used in conjunction with PostgreSQL
+ prepared statements using |PREPARE|_, |EXECUTE|_, |DEALLOCATE|_.
+ Instead of executing::
+
+ execute_batch(cur,
+ "big and complex SQL with %s %s params",
+ params_list)
+
+ it is possible to execute something like::
+
+ cur.execute("PREPARE stmt AS big and complex SQL with $1 $2 params")
+ execute_batch(cur, "EXECUTE stmt (%s, %s)", params_list)
+ cur.execute("DEALLOCATE stmt")
+
+ which may bring further performance benefits: if the operation to perform
+ is complex, every single execution will be faster as the query plan is
+ already cached; furthermore the amount of data to send on the server will
+ be lesser (one |EXECUTE| per param set instead of the whole, likely
+ longer, statement).
+
+ .. |PREPARE| replace:: :sql:`PREPARE`
+ .. _PREPARE: https://www.postgresql.org/docs/current/static/sql-prepare.html
+
+ .. |EXECUTE| replace:: :sql:`EXECUTE`
+ .. _EXECUTE: https://www.postgresql.org/docs/current/static/sql-execute.html
+
+ .. |DEALLOCATE| replace:: :sql:`DEALLOCATE`
+ .. _DEALLOCATE: https://www.postgresql.org/docs/current/static/sql-deallocate.html
+
+
+.. autofunction:: execute_values
+
+ .. versionadded:: 2.7
+ .. versionchanged:: 2.8
+ added the *fetch* parameter.
+
+
+.. index::
+ pair: Example; Coroutine;
+
+
+
+Coroutine support
+-----------------
+
+.. autofunction:: wait_select(conn)
+
+ .. versionchanged:: 2.6.2
+ allow to cancel a query using :kbd:`Ctrl-C`, see
+ :ref:`the FAQ <faq-interrupt-query>` for an example.
diff --git a/doc/src/faq.rst b/doc/src/faq.rst
new file mode 100644
index 0000000..9d1dbeb
--- /dev/null
+++ b/doc/src/faq.rst
@@ -0,0 +1,382 @@
+Frequently Asked Questions
+==========================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+Here are a few gotchas you may encounter using `psycopg2`. Feel free to
+suggest new entries!
+
+
+Meta
+----
+
+.. _faq-question:
+.. cssclass:: faq
+
+How do I ask a question?
+ - Have you first checked if your question is answered already in the
+ documentation?
+
+ - If your question is about installing psycopg, have you checked the
+ :ref:`install FAQ <faq-compile>` and the :ref:`install docs
+ <installation>`?
+
+ - Have you googled for your error message?
+
+ - If you haven't found an answer yet, please write to the `Mailing List`_.
+
+ - If you haven't found a bug, DO NOT write to the bug tracker to ask
+ questions. You will only get piro grumpy.
+
+ .. _mailing list: https://www.postgresql.org/list/psycopg/
+
+
+.. _faq-transactions:
+
+Problems with transactions handling
+-----------------------------------
+
+.. _faq-idle-in-transaction:
+.. cssclass:: faq
+
+Why does `!psycopg2` leave database sessions "idle in transaction"?
+ Psycopg normally starts a new transaction the first time a query is
+ executed, e.g. calling `cursor.execute()`, even if the command is a
+ :sql:`SELECT`. The transaction is not closed until an explicit
+ `~connection.commit()` or `~connection.rollback()`.
+
+ If you are writing a long-living program, you should probably make sure to
+ call one of the transaction closing methods before leaving the connection
+ unused for a long time (which may also be a few seconds, depending on the
+ concurrency level in your database). Alternatively you can use a
+ connection in `~connection.autocommit` mode to avoid a new transaction to
+ be started at the first command.
+
+
+.. _faq-transaction-aborted:
+.. cssclass:: faq
+
+I receive the error *current transaction is aborted, commands ignored until end of transaction block* and can't do anything else!
+ There was a problem *in the previous* command to the database, which
+ resulted in an error. The database will not recover automatically from
+ this condition: you must run a `~connection.rollback()` before sending
+ new commands to the session (if this seems too harsh, remember that
+ PostgreSQL supports nested transactions using the |SAVEPOINT|_ command).
+
+ .. |SAVEPOINT| replace:: :sql:`SAVEPOINT`
+ .. _SAVEPOINT: https://www.postgresql.org/docs/current/static/sql-savepoint.html
+
+
+.. _faq-transaction-aborted-multiprocess:
+.. cssclass:: faq
+
+Why do I get the error *current transaction is aborted, commands ignored until end of transaction block* when I use `!multiprocessing` (or any other forking system) and not when use `!threading`?
+ Psycopg's connections can't be shared across processes (but are thread
+ safe). If you are forking the Python process make sure to create a new
+ connection in each forked child. See :ref:`thread-safety` for further
+ informations.
+
+
+.. _faq-types:
+
+Problems with type conversions
+------------------------------
+
+.. _faq-cant-adapt:
+.. cssclass:: faq
+
+Why does `!cursor.execute()` raise the exception *can't adapt*?
+ Psycopg converts Python objects in a SQL string representation by looking
+ at the object class. The exception is raised when you are trying to pass
+ as query parameter an object for which there is no adapter registered for
+ its class. See :ref:`adapting-new-types` for informations.
+
+
+.. _faq-number-required:
+.. cssclass:: faq
+
+I can't pass an integer or a float parameter to my query: it says *a number is required*, but *it is* a number!
+ In your query string, you always have to use ``%s`` placeholders,
+ even when passing a number. All Python objects are converted by Psycopg
+ in their SQL representation, so they get passed to the query as strings.
+ See :ref:`query-parameters`. ::
+
+ >>> cur.execute("INSERT INTO numbers VALUES (%d)", (42,)) # WRONG
+ >>> cur.execute("INSERT INTO numbers VALUES (%s)", (42,)) # correct
+
+
+.. _faq-not-all-arguments-converted:
+.. cssclass:: faq
+
+I try to execute a query but it fails with the error *not all arguments converted during string formatting* (or *object does not support indexing*). Why?
+ Psycopg always require positional arguments to be passed as a sequence, even
+ when the query takes a single parameter. And remember that to make a
+ single item tuple in Python you need a comma! See :ref:`query-parameters`.
+ ::
+
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", "bar") # WRONG
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar")) # WRONG
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar",)) # correct
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", ["bar"]) # correct
+
+
+.. _faq-unicode:
+.. cssclass:: faq
+
+My database is Unicode, but I receive all the strings as UTF-8 `!str`. Can I receive `!unicode` objects instead?
+ The following magic formula will do the trick::
+
+ psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+ psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
+
+ See :ref:`unicode-handling` for the gory details.
+
+
+.. _faq-bytes:
+.. cssclass:: faq
+
+My database is in mixed encoding. My program was working on Python 2 but Python 3 fails decoding the strings. How do I avoid decoding?
+ From psycopg 2.8 you can use the following adapters to always return bytes
+ from strings::
+
+ psycopg2.extensions.register_type(psycopg2.extensions.BYTES)
+ psycopg2.extensions.register_type(psycopg2.extensions.BYTESARRAY)
+
+ See :ref:`unicode-handling` for an example.
+
+
+.. _faq-float:
+.. cssclass:: faq
+
+Psycopg converts :sql:`decimal`\/\ :sql:`numeric` database types into Python `!Decimal` objects. Can I have `!float` instead?
+ You can register a customized adapter for PostgreSQL decimal type::
+
+ DEC2FLOAT = psycopg2.extensions.new_type(
+ psycopg2.extensions.DECIMAL.values,
+ 'DEC2FLOAT',
+ lambda value, curs: float(value) if value is not None else None)
+ psycopg2.extensions.register_type(DEC2FLOAT)
+
+ See :ref:`type-casting-from-sql-to-python` to read the relevant
+ documentation. If you find `!psycopg2.extensions.DECIMAL` not available, use
+ `!psycopg2._psycopg.DECIMAL` instead.
+
+
+.. _faq-json-adapt:
+.. cssclass:: faq
+
+Psycopg automatically converts PostgreSQL :sql:`json` data into Python objects. How can I receive strings instead?
+ The easiest way to avoid JSON parsing is to register a no-op function with
+ `~psycopg2.extras.register_default_json()`::
+
+ psycopg2.extras.register_default_json(loads=lambda x: x)
+
+ See :ref:`adapt-json` for further details.
+
+
+.. _faq-jsonb-adapt:
+.. cssclass:: faq
+
+Psycopg converts :sql:`json` values into Python objects but :sql:`jsonb` values are returned as strings. Can :sql:`jsonb` be converted automatically?
+ Automatic conversion of :sql:`jsonb` values is supported from Psycopg
+ release 2.5.4. For previous versions you can register the :sql:`json`
+ typecaster on the :sql:`jsonb` oids (which are known and not supposed to
+ change in future PostgreSQL versions)::
+
+ psycopg2.extras.register_json(oid=3802, array_oid=3807, globally=True)
+
+ See :ref:`adapt-json` for further details.
+
+
+.. _faq-identifier:
+.. cssclass:: faq
+
+How can I pass field/table names to a query?
+ The arguments in the `~cursor.execute()` methods can only represent data
+ to pass to the query: they cannot represent a table or field name::
+
+ # This doesn't work
+ cur.execute("insert into %s values (%s)", ["my_table", 42])
+
+ If you want to build a query dynamically you can use the objects exposed
+ by the `psycopg2.sql` module::
+
+ cur.execute(
+ sql.SQL("insert into %s values (%%s)") % [sql.Identifier("my_table")],
+ [42])
+
+
+.. _faq-bytea-9.0:
+.. cssclass:: faq
+
+Transferring binary data from PostgreSQL 9.0 doesn't work.
+ PostgreSQL 9.0 uses by default `the "hex" format`__ to transfer
+ :sql:`bytea` data: the format can't be parsed by the libpq 8.4 and
+ earlier. The problem is solved in Psycopg 2.4.1, that uses its own parser
+ for the :sql:`bytea` format. For previous Psycopg releases, three options
+ to solve the problem are:
+
+ - set the bytea_output__ parameter to ``escape`` in the server;
+ - execute the database command ``SET bytea_output TO escape;`` in the
+ session before reading binary data;
+ - upgrade the libpq library on the client to at least 9.0.
+
+ .. __: https://www.postgresql.org/docs/current/static/datatype-binary.html
+ .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-BYTEA-OUTPUT
+
+
+.. _faq-array:
+.. cssclass:: faq
+
+Arrays of *TYPE* are not casted to list.
+ Arrays are only casted to list when their oid is known, and an array
+ typecaster is registered for them. If there is no typecaster, the array is
+ returned unparsed from PostgreSQL (e.g. ``{a,b,c}``). It is easy to create
+ a generic arrays typecaster, returning a list of array: an example is
+ provided in the `~psycopg2.extensions.new_array_type()` documentation.
+
+
+.. _faq-best-practices:
+
+Best practices
+--------------
+
+.. _faq-reuse-cursors:
+.. cssclass:: faq
+
+When should I save and re-use a cursor as opposed to creating a new one as needed?
+ Cursors are lightweight objects and creating lots of them should not pose
+ any kind of problem. But note that cursors used to fetch result sets will
+ cache the data and use memory in proportion to the result set size. Our
+ suggestion is to almost always create a new cursor and dispose old ones as
+ soon as the data is not required anymore (call `~cursor.close()` on
+ them.) The only exception are tight loops where one usually use the same
+ cursor for a whole bunch of :sql:`INSERT`\s or :sql:`UPDATE`\s.
+
+
+.. _faq-reuse-connections:
+.. cssclass:: faq
+
+When should I save and re-use a connection as opposed to creating a new one as needed?
+ Creating a connection can be slow (think of SSL over TCP) so the best
+ practice is to create a single connection and keep it open as long as
+ required. It is also good practice to rollback or commit frequently (even
+ after a single :sql:`SELECT` statement) to make sure the backend is never
+ left "idle in transaction". See also `psycopg2.pool` for lightweight
+ connection pooling.
+
+
+.. _faq-named-cursors:
+.. cssclass:: faq
+
+What are the advantages or disadvantages of using named cursors?
+ The only disadvantages is that they use up resources on the server and
+ that there is a little overhead because at least two queries (one to
+ create the cursor and one to fetch the initial result set) are issued to
+ the backend. The advantage is that data is fetched one chunk at a time:
+ using small `~cursor.fetchmany()` values it is possible to use very
+ little memory on the client and to skip or discard parts of the result set.
+
+
+.. _faq-interrupt-query:
+.. cssclass:: faq
+
+How do I interrupt a long-running query in an interactive shell?
+ Normally the interactive shell becomes unresponsive to :kbd:`Ctrl-C` when
+ running a query. Using a connection in green mode allows Python to
+ receive and handle the interrupt, although it may leave the connection
+ broken, if the async callback doesn't handle the `!KeyboardInterrupt`
+ correctly.
+
+ Starting from psycopg 2.6.2, the `~psycopg2.extras.wait_select` callback
+ can handle a :kbd:`Ctrl-C` correctly. For previous versions, you can use
+ `this implementation`__.
+
+ .. __: https://www.psycopg.org/articles/2014/07/20/cancelling-postgresql-statements-python/
+
+ .. code-block:: pycon
+
+ >>> psycopg2.extensions.set_wait_callback(psycopg2.extras.wait_select)
+ >>> cnn = psycopg2.connect('')
+ >>> cur = cnn.cursor()
+ >>> cur.execute("select pg_sleep(10)")
+ ^C
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ QueryCanceledError: canceling statement due to user request
+
+ >>> cnn.rollback()
+ >>> # You can use the connection and cursor again from here
+
+
+.. _faq-compile:
+
+Problems compiling and installing psycopg2
+------------------------------------------
+
+.. _faq-wheels:
+.. cssclass:: faq
+
+Psycopg 2.8 fails to install, Psycopg 2.7 was working fine.
+ With Psycopg 2.7 you were installing binary packages, but they have proven
+ unreliable so now you have to install them explicitly using the
+ ``psycopg2-binary`` package. See :ref:`binary-packages` for all the
+ details.
+
+.. _faq-python-h:
+.. cssclass:: faq
+
+I can't compile `!psycopg2`: the compiler says *error: Python.h: No such file or directory*. What am I missing?
+ You need to install a Python development package: it is usually called
+ ``python-dev`` or ``python3-dev`` according to your Python version.
+
+
+.. _faq-libpq-fe-h:
+.. cssclass:: faq
+
+I can't compile `!psycopg2`: the compiler says *error: libpq-fe.h: No such file or directory*. What am I missing?
+ You need to install the development version of the libpq: the package is
+ usually called ``libpq-dev``.
+
+
+.. _faq-lo_truncate:
+.. cssclass:: faq
+
+`!psycopg2` raises `!ImportError` with message *_psycopg.so: undefined symbol: lo_truncate* when imported.
+ This means that Psycopg was compiled with |lo_truncate|_ support (*i.e.*
+ the libpq used at compile time was version >= 8.3) but at runtime an older
+ libpq dynamic library is found.
+
+ Fast-forward several years, if the message reports *undefined symbol:
+ lo_truncate64* it means that Psycopg was built with large objects 64 bits
+ API support (*i.e.* the libpq used at compile time was at least 9.3) but
+ at runtime an older libpq dynamic library is found.
+
+ You can use:
+
+ .. code-block:: shell
+
+ $ ldd /path/to/packages/psycopg2/_psycopg.so | grep libpq
+
+ to find what is the libpq dynamic library used at runtime.
+
+ You can avoid the problem by using the same version of the
+ :program:`pg_config` at install time and the libpq at runtime.
+
+ .. |lo_truncate| replace:: `!lo_truncate()`
+ .. _lo_truncate: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-TRUNCATE
+
+
+.. _faq-import-mod_wsgi:
+.. cssclass:: faq
+
+Psycopg raises *ImportError: cannot import name tz* on import in mod_wsgi / ASP, but it works fine otherwise.
+ If `!psycopg2` is installed in an egg_ (e.g. because installed by
+ :program:`easy_install`), the user running the program may be unable to
+ write in the `eggs cache`__. Set the env variable
+ :envvar:`PYTHON_EGG_CACHE` to a writable directory. With modwsgi you can
+ use the WSGIPythonEggs__ directive.
+
+ .. _egg: http://peak.telecommunity.com/DevCenter/PythonEggs
+ .. __: https://stackoverflow.com/questions/2192323/what-is-the-python-egg-cache-python-egg-cache
+ .. __: https://modwsgi.readthedocs.io/en/develop/configuration-directives/WSGIPythonEggs.html
diff --git a/doc/src/index.rst b/doc/src/index.rst
new file mode 100644
index 0000000..580d44a
--- /dev/null
+++ b/doc/src/index.rst
@@ -0,0 +1,70 @@
+=================================================
+Psycopg -- PostgreSQL database adapter for Python
+=================================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+Psycopg_ is the most popular PostgreSQL_ database adapter for the Python_
+programming language. Its main features are the complete implementation of
+the Python |DBAPI|_ specification and the thread safety (several threads can
+share the same connection). It was designed for heavily multi-threaded
+applications that create and destroy lots of cursors and make a large number
+of concurrent :sql:`INSERT`\s or :sql:`UPDATE`\s.
+
+Psycopg 2 is mostly implemented in C as a libpq_ wrapper, resulting in being
+both efficient and secure. It features client-side and :ref:`server-side
+<server-side-cursors>` cursors, :ref:`asynchronous communication
+<async-support>` and :ref:`notifications <async-notify>`, :ref:`COPY <copy>`
+support. Many Python types are supported out-of-the-box and :ref:`adapted to
+matching PostgreSQL data types <python-types-adaptation>`; adaptation can be
+extended and customized thanks to a flexible :ref:`objects adaptation system
+<adapting-new-types>`.
+
+Psycopg 2 is both Unicode and Python 3 friendly.
+
+
+.. _Psycopg: https://psycopg.org/
+.. _PostgreSQL: https://www.postgresql.org/
+.. _Python: https://www.python.org/
+.. _libpq: https://www.postgresql.org/docs/current/static/libpq.html
+
+
+.. rubric:: Contents
+
+.. toctree::
+ :maxdepth: 2
+
+ install
+ usage
+ module
+ connection
+ cursor
+ advanced
+ extensions
+ extras
+ errors
+ sql
+ tz
+ pool
+ errorcodes
+ faq
+ news
+ license
+
+
+.. ifconfig:: builder != 'text'
+
+ .. rubric:: Indices and tables
+
+ * :ref:`genindex`
+ * :ref:`modindex`
+ * :ref:`search`
+
+
+.. ifconfig:: todo_include_todos
+
+ .. note::
+
+ **To Do items in the documentation**
+
+ .. todolist::
diff --git a/doc/src/install.rst b/doc/src/install.rst
new file mode 100644
index 0000000..2eb66c8
--- /dev/null
+++ b/doc/src/install.rst
@@ -0,0 +1,357 @@
+.. _installation:
+
+Installation
+============
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+Psycopg is a PostgreSQL_ adapter for the Python_ programming language. It is a
+wrapper for the libpq_, the official PostgreSQL client library.
+
+.. _PostgreSQL: https://www.postgresql.org/
+.. _Python: https://www.python.org/
+
+
+.. index::
+ single: Install; from PyPI
+ single: Install; wheel
+ single: Wheel
+
+.. _binary-packages:
+
+Quick Install
+-------------
+
+For most operating systems, the quickest way to install Psycopg is using the
+wheel_ package available on PyPI_:
+
+.. code-block:: console
+
+ $ pip install psycopg2-binary
+
+This will install a pre-compiled binary version of the module which does not
+require the build or runtime prerequisites described below. Make sure to use
+an up-date-date version of :program:`pip` (you can upgrade it using something
+like ``pip install -U pip``).
+
+You may then import the ``psycopg2`` package, as usual:
+
+.. code-block:: python
+
+ import psycopg2
+
+ # Connect to your postgres DB
+ conn = psycopg2.connect("dbname=test user=postgres")
+
+ # Open a cursor to perform database operations
+ cur = conn.cursor()
+
+ # Execute a query
+ cur.execute("SELECT * FROM my_data")
+
+ # Retrieve query results
+ records = cur.fetchall()
+
+.. _PyPI: https://pypi.org/project/psycopg2-binary/
+.. _wheel: https://pythonwheels.com/
+
+
+psycopg vs psycopg-binary
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``psycopg2-binary`` package is meant for beginners to start playing
+with Python and PostgreSQL without the need to meet the build
+requirements.
+
+If you are the maintainer of a published package depending on `!psycopg2`
+you shouldn't use ``psycopg2-binary`` as a module dependency. **For
+production use you are advised to use the source distribution.**
+
+The binary packages come with their own versions of a few C libraries,
+among which ``libpq`` and ``libssl``, which will be used regardless of other
+libraries available on the client: upgrading the system libraries will not
+upgrade the libraries used by `!psycopg2`. Please build `!psycopg2` from
+source if you want to maintain binary upgradeability.
+
+.. warning::
+
+ The `!psycopg2` wheel package comes packaged, among the others, with its
+ own ``libssl`` binary. This may create conflicts with other extension
+ modules binding with ``libssl`` as well, for instance with the Python
+ `ssl` module: in some cases, under concurrency, the interaction between
+ the two libraries may result in a segfault. In case of doubts you are
+ advised to use a package built from source.
+
+
+.. index::
+ single: Install; disable wheel
+ single: Wheel; disable
+
+.. _disable-wheel:
+
+Change in binary packages between Psycopg 2.7 and 2.8
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In version 2.7.x, :command:`pip install psycopg2` would have tried to install
+automatically the binary package of Psycopg. Because of concurrency problems
+binary packages have displayed, ``psycopg2-binary`` has become a separate
+package, and from 2.8 it has become the only way to install the binary
+package.
+
+If you are using Psycopg 2.7 and you want to disable the use of wheel binary
+packages, relying on the system libraries available on your client, you
+can use the :command:`pip` |--no-binary option|__, e.g.:
+
+.. code-block:: console
+
+ $ pip install --no-binary :all: psycopg2
+
+.. |--no-binary option| replace:: ``--no-binary`` option
+.. __: https://pip.pypa.io/en/stable/reference/pip_install/#install-no-binary
+
+which can be specified in your :file:`requirements.txt` files too, e.g. use:
+
+.. code-block:: none
+
+ psycopg2>=2.7,<2.8 --no-binary psycopg2
+
+to use the last bugfix release of the `!psycopg2` 2.7 package, specifying to
+always compile it from source. Of course in this case you will have to meet
+the :ref:`build prerequisites <build-prerequisites>`.
+
+
+.. index::
+ single: Prerequisites
+
+Prerequisites
+-------------
+
+The current `!psycopg2` implementation supports:
+
+..
+ NOTE: keep consistent with setup.py and the /features/ page.
+
+- Python versions from 3.6 to 3.9
+- PostgreSQL server versions from 7.4 to 13
+- PostgreSQL client library version from 9.1
+
+
+
+.. _build-prerequisites:
+
+Build prerequisites
+^^^^^^^^^^^^^^^^^^^
+
+The build prerequisites are to be met in order to install Psycopg from source
+code, from a source distribution package, GitHub_ or from PyPI.
+
+.. _GitHub: https://github.com/psycopg/psycopg2
+
+Psycopg is a C wrapper around the libpq_ PostgreSQL client library. To install
+it from sources you will need:
+
+- A C compiler.
+
+- The Python header files. They are usually installed in a package such as
+ **python-dev** or **python3-dev**. A message such as *error: Python.h: No
+ such file or directory* is an indication that the Python headers are
+ missing.
+
+- The libpq header files. They are usually installed in a package such as
+ **libpq-dev**. If you get an *error: libpq-fe.h: No such file or directory*
+ you are missing them.
+
+- The :program:`pg_config` program: it is usually installed by the
+ **libpq-dev** package but sometimes it is not in a :envvar:`PATH` directory.
+ Having it in the :envvar:`PATH` greatly streamlines the installation, so try
+ running ``pg_config --version``: if it returns an error or an unexpected
+ version number then locate the directory containing the :program:`pg_config`
+ shipped with the right libpq version (usually
+ ``/usr/lib/postgresql/X.Y/bin/``) and add it to the :envvar:`PATH`:
+
+ .. code-block:: console
+
+ $ export PATH=/usr/lib/postgresql/X.Y/bin/:$PATH
+
+ You only need :program:`pg_config` to compile `!psycopg2`, not for its
+ regular usage.
+
+Once everything is in place it's just a matter of running the standard:
+
+.. code-block:: console
+
+ $ pip install psycopg2
+
+or, from the directory containing the source code:
+
+.. code-block:: console
+
+ $ python setup.py build
+ $ python setup.py install
+
+
+Runtime requirements
+^^^^^^^^^^^^^^^^^^^^
+
+Unless you compile `!psycopg2` as a static library, or you install it from a
+self-contained wheel package, it will need the libpq_ library at runtime
+(usually distributed in a ``libpq.so`` or ``libpq.dll`` file). `!psycopg2`
+relies on the host OS to find the library if the library is installed in a
+standard location there is usually no problem; if the library is in a
+non-standard location you will have to tell somehow Psycopg how to find it,
+which is OS-dependent (for instance setting a suitable
+:envvar:`LD_LIBRARY_PATH` on Linux).
+
+.. note::
+
+ The libpq header files used to compile `!psycopg2` should match the
+ version of the library linked at runtime. If you get errors about missing
+ or mismatching libraries when importing `!psycopg2` check (e.g. using
+ :program:`ldd`) if the module ``psycopg2/_psycopg.so`` is linked to the
+ right ``libpq.so``.
+
+.. note::
+
+ Whatever version of libpq `!psycopg2` is compiled with, it will be
+ possible to connect to PostgreSQL servers of any supported version: just
+ install the most recent libpq version or the most practical, without
+ trying to match it to the version of the PostgreSQL server you will have
+ to connect to.
+
+
+.. index::
+ single: setup.py
+ single: setup.cfg
+
+Non-standard builds
+-------------------
+
+If you have less standard requirements such as:
+
+- creating a :ref:`debug build <debug-build>`,
+- using :program:`pg_config` not in the :envvar:`PATH`,
+
+then take a look at the ``setup.cfg`` file.
+
+Some of the options available in ``setup.cfg`` are also available as command
+line arguments of the ``build_ext`` sub-command. For instance you can specify
+an alternate :program:`pg_config` location using:
+
+.. code-block:: console
+
+ $ python setup.py build_ext --pg-config /path/to/pg_config build
+
+Use ``python setup.py build_ext --help`` to get a list of the options
+supported.
+
+
+.. index::
+ single: debug
+ single: PSYCOPG_DEBUG
+
+.. _debug-build:
+
+Creating a debug build
+^^^^^^^^^^^^^^^^^^^^^^
+
+In case of problems, Psycopg can be configured to emit detailed debug
+messages, which can be very useful for diagnostics and to report a bug. In
+order to create a debug package:
+
+- `Download`__ and unpack the Psycopg *source package* (the ``.tar.gz``
+ package).
+
+- Edit the ``setup.cfg`` file adding the ``PSYCOPG_DEBUG`` flag to the
+ ``define`` option.
+
+- :ref:`Compile and install <build-prerequisites>` the package.
+
+- Set the :envvar:`PSYCOPG_DEBUG` environment variable:
+
+.. code-block:: console
+
+ $ export PSYCOPG_DEBUG=1
+
+- Run your program (making sure that the `!psycopg2` package imported is the
+ one you just compiled and not e.g. the system one): you will have a copious
+ stream of informations printed on stderr.
+
+.. __: https://pypi.org/project/psycopg2/#files
+
+
+Non-standard Python Implementation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The `psycopg2` package is the current mature implementation of the adapter: it
+is a C extension and as such it is only compatible with CPython_. If you want
+to use Psycopg on a different Python implementation (PyPy, Jython, IronPython)
+there is a couple of alternative:
+
+- a `Ctypes port`__, but it is not as mature as the C implementation yet
+ and it is not as feature-complete;
+
+- a `CFFI port`__ which is currently more used and reported more efficient on
+ PyPy, but please be careful of its version numbers because they are not
+ aligned to the official psycopg2 ones and some features may differ.
+
+.. _PostgreSQL: https://www.postgresql.org/
+.. _Python: https://www.python.org/
+.. _libpq: https://www.postgresql.org/docs/current/static/libpq.html
+.. _CPython: https://en.wikipedia.org/wiki/CPython
+.. _Ctypes: https://docs.python.org/library/ctypes.html
+.. __: https://github.com/mvantellingen/psycopg2-ctypes
+.. __: https://github.com/chtd/psycopg2cffi
+
+
+.. index::
+ single: tests
+
+.. _test-suite:
+
+Running the test suite
+----------------------
+
+Once `!psycopg2` is installed you can run the test suite to verify it is
+working correctly. From the source directory, you can run:
+
+.. code-block:: console
+
+ $ python -c "import tests; tests.unittest.main(defaultTest='tests.test_suite')" --verbose
+
+The tests run against a database called ``psycopg2_test`` on UNIX socket and
+the standard port. You can configure a different database to run the test by
+setting the environment variables:
+
+- :envvar:`PSYCOPG2_TESTDB`
+- :envvar:`PSYCOPG2_TESTDB_HOST`
+- :envvar:`PSYCOPG2_TESTDB_PORT`
+- :envvar:`PSYCOPG2_TESTDB_USER`
+
+The database should already exist before running the tests.
+
+
+.. _other-problems:
+
+If you still have problems
+--------------------------
+
+Try the following. *In order:*
+
+- Read again the :ref:`build-prerequisites`.
+
+- Read the :ref:`FAQ <faq-compile>`.
+
+- Google for `!psycopg2` *your error message*. Especially useful the week
+ after the release of a new OS X version.
+
+- Write to the `Mailing List`_.
+
+- If you think that you have discovered a bug, test failure or missing feature
+ please raise a ticket in the `bug tracker`_.
+
+- Complain on your blog or on Twitter that `!psycopg2` is the worst package
+ ever and about the quality time you have wasted figuring out the correct
+ :envvar:`ARCHFLAGS`. Especially useful from the Starbucks near you.
+
+.. _mailing list: https://www.postgresql.org/list/psycopg/
+.. _bug tracker: https://github.com/psycopg/psycopg2/issues
diff --git a/doc/src/license.rst b/doc/src/license.rst
new file mode 100644
index 0000000..53a4e72
--- /dev/null
+++ b/doc/src/license.rst
@@ -0,0 +1,7 @@
+.. index::
+ single: License
+
+License
+=======
+
+.. include:: ../../LICENSE
diff --git a/doc/src/module.rst b/doc/src/module.rst
new file mode 100644
index 0000000..f17f3ae
--- /dev/null
+++ b/doc/src/module.rst
@@ -0,0 +1,388 @@
+The `psycopg2` module content
+==================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. module:: psycopg2
+
+The module interface respects the standard defined in the |DBAPI|_.
+
+.. index::
+ single: Connection string
+ double: Connection; Parameters
+ single: Username; Connection
+ single: Password; Connection
+ single: Host; Connection
+ single: Port; Connection
+ single: DSN (Database Source Name)
+
+.. function::
+ connect(dsn=None, connection_factory=None, cursor_factory=None, async=False, \*\*kwargs)
+
+ Create a new database session and return a new `connection` object.
+
+ The connection parameters can be specified as a `libpq connection
+ string`__ using the *dsn* parameter::
+
+ conn = psycopg2.connect("dbname=test user=postgres password=secret")
+
+ or using a set of keyword arguments::
+
+ conn = psycopg2.connect(dbname="test", user="postgres", password="secret")
+
+ or using a mix of both: if the same parameter name is specified in both
+ sources, the *kwargs* value will have precedence over the *dsn* value.
+ Note that either the *dsn* or at least one connection-related keyword
+ argument is required.
+
+ The basic connection parameters are:
+
+ - `!dbname` -- the database name (`!database` is a deprecated alias)
+ - `!user` -- user name used to authenticate
+ - `!password` -- password used to authenticate
+ - `!host` -- database host address (defaults to UNIX socket if not provided)
+ - `!port` -- connection port number (defaults to 5432 if not provided)
+
+ Any other connection parameter supported by the client library/server can
+ be passed either in the connection string or as a keyword. The PostgreSQL
+ documentation contains the complete list of the `supported parameters`__.
+ Also note that the same parameters can be passed to the client library
+ using `environment variables`__.
+
+ .. __:
+ .. _connstring: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
+ .. __:
+ .. _connparams: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+ .. __:
+ .. _connenvvars: https://www.postgresql.org/docs/current/static/libpq-envars.html
+
+ Using the *connection_factory* parameter a different class or
+ connections factory can be specified. It should be a callable object
+ taking a *dsn* string argument. See :ref:`subclassing-connection` for
+ details. If a *cursor_factory* is specified, the connection's
+ `~connection.cursor_factory` is set to it. If you only need customized
+ cursors you can use this parameter instead of subclassing a connection.
+
+ Using *async*\=\ `!True` an asynchronous connection will be created: see
+ :ref:`async-support` to know about advantages and limitations. *async_* is
+ a valid alias for the Python version where ``async`` is a keyword.
+
+ .. versionchanged:: 2.4.3
+ any keyword argument is passed to the connection. Previously only the
+ basic parameters (plus `!sslmode`) were supported as keywords.
+
+ .. versionchanged:: 2.5
+ added the *cursor_factory* parameter.
+
+ .. versionchanged:: 2.7
+ both *dsn* and keyword arguments can be specified.
+
+ .. versionchanged:: 2.7
+ added *async_* alias.
+
+ .. seealso::
+
+ - `~psycopg2.extensions.parse_dsn`
+ - libpq `connection string syntax`__
+ - libpq supported `connection parameters`__
+ - libpq supported `environment variables`__
+
+ .. __: connstring_
+ .. __: connparams_
+ .. __: connenvvars_
+
+ .. extension::
+
+ The non-connection-related keyword parameters are Psycopg extensions
+ to the |DBAPI|_.
+
+.. data:: apilevel
+
+ String constant stating the supported DB API level. For `psycopg2` is
+ ``2.0``.
+
+.. data:: threadsafety
+
+ Integer constant stating the level of thread safety the interface
+ supports. For `psycopg2` is ``2``, i.e. threads can share the module
+ and the connection. See :ref:`thread-safety` for details.
+
+.. data:: paramstyle
+
+ String constant stating the type of parameter marker formatting expected
+ by the interface. For `psycopg2` is ``pyformat``. See also
+ :ref:`query-parameters`.
+
+.. data:: __libpq_version__
+
+ Integer constant reporting the version of the ``libpq`` library this
+ ``psycopg2`` module was compiled with (in the same format of
+ `~psycopg2.extensions.ConnectionInfo.server_version`). If this value is
+ greater or equal than ``90100`` then you may query the version of the
+ actually loaded library using the `~psycopg2.extensions.libpq_version()`
+ function.
+
+
+.. index::
+ single: Exceptions; DB API
+
+.. _dbapi-exceptions:
+
+Exceptions
+----------
+
+In compliance with the |DBAPI|_, the module makes informations about errors
+available through the following exceptions:
+
+.. exception:: Warning
+
+ Exception raised for important warnings like data truncations while
+ inserting, etc. It is a subclass of the Python `StandardError`
+ (`Exception` on Python 3).
+
+.. exception:: Error
+
+ Exception that is the base class of all other error exceptions. You can
+ use this to catch all errors with one single `!except` statement. Warnings
+ are not considered errors and thus not use this class as base. It
+ is a subclass of the Python `StandardError` (`Exception` on Python 3).
+
+ .. attribute:: pgerror
+
+ String representing the error message returned by the backend,
+ `!None` if not available.
+
+ .. attribute:: pgcode
+
+ String representing the error code returned by the backend, `!None`
+ if not available. The `~psycopg2.errorcodes` module contains
+ symbolic constants representing PostgreSQL error codes.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> try:
+ ... cur.execute("SELECT * FROM barf")
+ ... except psycopg2.Error as e:
+ ... pass
+
+ >>> e.pgcode
+ '42P01'
+ >>> print e.pgerror
+ ERROR: relation "barf" does not exist
+ LINE 1: SELECT * FROM barf
+ ^
+
+ .. attribute:: cursor
+
+ The cursor the exception was raised from; `None` if not applicable.
+
+ .. attribute:: diag
+
+ A `~psycopg2.extensions.Diagnostics` object containing further
+ information about the error. ::
+
+ >>> try:
+ ... cur.execute("SELECT * FROM barf")
+ ... except psycopg2.Error, e:
+ ... pass
+
+ >>> e.diag.severity
+ 'ERROR'
+ >>> e.diag.message_primary
+ 'relation "barf" does not exist'
+
+ .. versionadded:: 2.5
+
+ .. extension::
+
+ The `~Error.pgerror`, `~Error.pgcode`, `~Error.cursor`, and
+ `~Error.diag` attributes are Psycopg extensions.
+
+
+.. exception:: InterfaceError
+
+ Exception raised for errors that are related to the database interface
+ rather than the database itself. It is a subclass of `Error`.
+
+.. exception:: DatabaseError
+
+ Exception raised for errors that are related to the database. It is a
+ subclass of `Error`.
+
+.. exception:: DataError
+
+ Exception raised for errors that are due to problems with the processed
+ data like division by zero, numeric value out of range, etc. It is a
+ subclass of `DatabaseError`.
+
+.. exception:: OperationalError
+
+ Exception raised for errors that are related to the database's operation
+ and not necessarily under the control of the programmer, e.g. an
+ unexpected disconnect occurs, the data source name is not found, a
+ transaction could not be processed, a memory allocation error occurred
+ during processing, etc. It is a subclass of `DatabaseError`.
+
+.. exception:: IntegrityError
+
+ Exception raised when the relational integrity of the database is
+ affected, e.g. a foreign key check fails. It is a subclass of
+ `DatabaseError`.
+
+.. exception:: InternalError
+
+ Exception raised when the database encounters an internal error, e.g. the
+ cursor is not valid anymore, the transaction is out of sync, etc. It is a
+ subclass of `DatabaseError`.
+
+.. exception:: ProgrammingError
+
+ Exception raised for programming errors, e.g. table not found or already
+ exists, syntax error in the SQL statement, wrong number of parameters
+ specified, etc. It is a subclass of `DatabaseError`.
+
+.. exception:: NotSupportedError
+
+ Exception raised in case a method or database API was used which is not
+ supported by the database, e.g. requesting a `!rollback()` on a
+ connection that does not support transaction or has transactions turned
+ off. It is a subclass of `DatabaseError`.
+
+
+.. extension::
+
+ Psycopg actually raises a different exception for each :sql:`SQLSTATE`
+ error returned by the database: the classes are available in the
+ `psycopg2.errors` module. Every exception class is a subclass of one of
+ the exception classes defined here though, so they don't need to be
+ trapped specifically: trapping `!Error` or `!DatabaseError` is usually
+ what needed to write a generic error handler; trapping a specific error
+ such as `!NotNullViolation` can be useful to write specific exception
+ handlers.
+
+
+This is the exception inheritance layout:
+
+.. parsed-literal::
+
+ `!StandardError`
+ \|__ `Warning`
+ \|__ `Error`
+ \|__ `InterfaceError`
+ \|__ `DatabaseError`
+ \|__ `DataError`
+ \|__ `OperationalError`
+ \|__ `IntegrityError`
+ \|__ `InternalError`
+ \|__ `ProgrammingError`
+ \|__ `NotSupportedError`
+
+
+
+.. _type-objects-and-constructors:
+
+Type Objects and Constructors
+-----------------------------
+
+.. note::
+
+ This section is mostly copied verbatim from the |DBAPI|_
+ specification. While these objects are exposed in compliance to the
+ DB API, Psycopg offers very accurate tools to convert data between Python
+ and PostgreSQL formats. See :ref:`adapting-new-types` and
+ :ref:`type-casting-from-sql-to-python`
+
+Many databases need to have the input in a particular format for
+binding to an operation's input parameters. For example, if an
+input is destined for a DATE column, then it must be bound to the
+database in a particular string format. Similar problems exist
+for "Row ID" columns or large binary items (e.g. blobs or RAW
+columns). This presents problems for Python since the parameters
+to the .execute*() method are untyped. When the database module
+sees a Python string object, it doesn't know if it should be bound
+as a simple CHAR column, as a raw BINARY item, or as a DATE.
+
+To overcome this problem, a module must provide the constructors
+defined below to create objects that can hold special values.
+When passed to the cursor methods, the module can then detect the
+proper type of the input parameter and bind it accordingly.
+
+A Cursor Object's description attribute returns information about
+each of the result columns of a query. The type_code must compare
+equal to one of Type Objects defined below. Type Objects may be
+equal to more than one type code (e.g. DATETIME could be equal to
+the type codes for date, time and timestamp columns; see the
+Implementation Hints below for details).
+
+The module exports the following constructors and singletons:
+
+.. function:: Date(year,month,day)
+
+ This function constructs an object holding a date value.
+
+.. function:: Time(hour,minute,second)
+
+ This function constructs an object holding a time value.
+
+.. function:: Timestamp(year,month,day,hour,minute,second)
+
+ This function constructs an object holding a time stamp value.
+
+.. function:: DateFromTicks(ticks)
+
+ This function constructs an object holding a date value from the given
+ ticks value (number of seconds since the epoch; see the documentation of
+ the standard Python time module for details).
+
+.. function:: TimeFromTicks(ticks)
+
+ This function constructs an object holding a time value from the given
+ ticks value (number of seconds since the epoch; see the documentation of
+ the standard Python time module for details).
+
+.. function:: TimestampFromTicks(ticks)
+
+ This function constructs an object holding a time stamp value from the
+ given ticks value (number of seconds since the epoch; see the
+ documentation of the standard Python time module for details).
+
+.. function:: Binary(string)
+
+ This function constructs an object capable of holding a binary (long)
+ string value.
+
+.. note::
+
+ All the adapters returned by the module level factories (`!Binary`,
+ `!Date`, `!Time`, `!Timestamp` and the `!*FromTicks` variants) expose the
+ wrapped object (a regular Python object such as `!datetime`) in an
+ `!adapted` attribute.
+
+.. data:: STRING
+
+ This type object is used to describe columns in a database that are
+ string-based (e.g. CHAR).
+
+.. data:: BINARY
+
+ This type object is used to describe (long) binary columns in a database
+ (e.g. LONG, RAW, BLOBs).
+
+.. data:: NUMBER
+
+ This type object is used to describe numeric columns in a database.
+
+.. data:: DATETIME
+
+ This type object is used to describe date/time columns in a database.
+
+.. data:: ROWID
+
+ This type object is used to describe the "Row ID" column in a database.
+
+
+.. testcode::
+ :hide:
+
+ conn.rollback()
diff --git a/doc/src/news.rst b/doc/src/news.rst
new file mode 100644
index 0000000..053d646
--- /dev/null
+++ b/doc/src/news.rst
@@ -0,0 +1,8 @@
+.. index::
+ single: Release notes
+ single: News
+
+Release notes
+=============
+
+.. include:: ../../NEWS
diff --git a/doc/src/pool.rst b/doc/src/pool.rst
new file mode 100644
index 0000000..95f4e23
--- /dev/null
+++ b/doc/src/pool.rst
@@ -0,0 +1,60 @@
+`psycopg2.pool` -- Connections pooling
+======================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. index::
+ pair: Connection; Pooling
+
+.. module:: psycopg2.pool
+
+Creating new PostgreSQL connections can be an expensive operation. This
+module offers a few pure Python classes implementing simple connection pooling
+directly in the client application.
+
+.. class:: AbstractConnectionPool(minconn, maxconn, \*args, \*\*kwargs)
+
+ Base class implementing generic key-based pooling code.
+
+ New *minconn* connections are created automatically. The pool will support
+ a maximum of about *maxconn* connections. *\*args* and *\*\*kwargs* are
+ passed to the `~psycopg2.connect()` function.
+
+ The following methods are expected to be implemented by subclasses:
+
+ .. method:: getconn(key=None)
+
+ Get a free connection from the pool.
+
+ The *key* parameter is optional: if used, the connection will be
+ associated to the key and calling `!getconn()` with the same key again
+ will return the same connection.
+
+ .. method:: putconn(conn, key=None, close=False)
+
+ Put away a connection.
+
+ If *close* is `!True`, discard the connection from the pool.
+ *key* should be used consistently with `getconn()`.
+
+ .. method:: closeall
+
+ Close all the connections handled by the pool.
+
+ Note that all the connections are closed, including ones
+ eventually in use by the application.
+
+
+The following classes are `AbstractConnectionPool` subclasses ready to
+be used.
+
+.. autoclass:: SimpleConnectionPool
+
+ .. note:: This pool class is useful only for single-threaded applications.
+
+
+.. index:: Multithread; Connection pooling
+
+.. autoclass:: ThreadedConnectionPool
+
+ .. note:: This pool class can be safely used in multi-threaded applications.
diff --git a/doc/src/sql.rst b/doc/src/sql.rst
new file mode 100644
index 0000000..c6507e0
--- /dev/null
+++ b/doc/src/sql.rst
@@ -0,0 +1,147 @@
+`psycopg2.sql` -- SQL string composition
+========================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. module:: psycopg2.sql
+
+.. versionadded:: 2.7
+
+The module contains objects and functions useful to generate SQL dynamically,
+in a convenient and safe way. SQL identifiers (e.g. names of tables and
+fields) cannot be passed to the `~cursor.execute()` method like query
+arguments::
+
+ # This will not work
+ table_name = 'my_table'
+ cur.execute("insert into %s values (%s, %s)", [table_name, 10, 20])
+
+The SQL query should be composed before the arguments are merged, for
+instance::
+
+ # This works, but it is not optimal
+ table_name = 'my_table'
+ cur.execute(
+ "insert into %s values (%%s, %%s)" % table_name,
+ [10, 20])
+
+This sort of works, but it is an accident waiting to happen: the table name
+may be an invalid SQL literal and need quoting; even more serious is the
+security problem in case the table name comes from an untrusted source. The
+name should be escaped using `~psycopg2.extensions.quote_ident()`::
+
+ # This works, but it is not optimal
+ table_name = 'my_table'
+ cur.execute(
+ "insert into %s values (%%s, %%s)" % ext.quote_ident(table_name),
+ [10, 20])
+
+This is now safe, but it somewhat ad-hoc. In case, for some reason, it is
+necessary to include a value in the query string (as opposite as in a value)
+the merging rule is still different (`~psycopg2.extensions.adapt()` should be
+used...). It is also still relatively dangerous: if `!quote_ident()` is
+forgotten somewhere, the program will usually work, but will eventually crash
+in the presence of a table or field name with containing characters to escape,
+or will present a potentially exploitable weakness.
+
+The objects exposed by the `!psycopg2.sql` module allow generating SQL
+statements on the fly, separating clearly the variable parts of the statement
+from the query parameters::
+
+ from psycopg2 import sql
+
+ cur.execute(
+ sql.SQL("insert into {} values (%s, %s)")
+ .format(sql.Identifier('my_table')),
+ [10, 20])
+
+
+Module usage
+------------
+
+Usually you should express the template of your query as an `SQL` instance
+with `{}`\-style placeholders and use `~SQL.format()` to merge the variable
+parts into them, all of which must be `Composable` subclasses. You can still
+have `%s`\ -style placeholders in your query and pass values to
+`~cursor.execute()`: such value placeholders will be untouched by
+`!format()`::
+
+ query = sql.SQL("select {field} from {table} where {pkey} = %s").format(
+ field=sql.Identifier('my_name'),
+ table=sql.Identifier('some_table'),
+ pkey=sql.Identifier('id'))
+
+The resulting object is meant to be passed directly to cursor methods such as
+`~cursor.execute()`, `~cursor.executemany()`, `~cursor.copy_expert()`, but can
+also be used to compose a query as a Python string, using the
+`~Composable.as_string()` method::
+
+ cur.execute(query, (42,))
+
+If part of your query is a variable sequence of arguments, such as a
+comma-separated list of field names, you can use the `SQL.join()` method to
+pass them to the query::
+
+ query = sql.SQL("select {fields} from {table}").format(
+ fields=sql.SQL(',').join([
+ sql.Identifier('field1'),
+ sql.Identifier('field2'),
+ sql.Identifier('field3'),
+ ]),
+ table=sql.Identifier('some_table'))
+
+
+`!sql` objects
+--------------
+
+The `!sql` objects are in the following inheritance hierarchy:
+
+| `Composable`: the base class exposing the common interface
+| ``|__`` `SQL`: a literal snippet of an SQL query
+| ``|__`` `Identifier`: a PostgreSQL identifier or dot-separated sequence of identifiers
+| ``|__`` `Literal`: a value hardcoded into a query
+| ``|__`` `Placeholder`: a `%s`\ -style placeholder whose value will be added later e.g. by `~cursor.execute()`
+| ``|__`` `Composed`: a sequence of `!Composable` instances.
+
+
+.. autoclass:: Composable
+
+ .. automethod:: as_string
+
+
+.. autoclass:: SQL
+
+ .. autoattribute:: string
+
+ .. automethod:: format
+
+ .. automethod:: join
+
+
+.. autoclass:: Identifier
+
+ .. versionchanged:: 2.8
+ added support for multiple strings.
+
+ .. autoattribute:: strings
+
+ .. versionadded:: 2.8
+ previous verions only had a `!string` attribute. The attribute
+ still exists but is deprecate and will only work if the
+ `!Identifier` wraps a single string.
+
+.. autoclass:: Literal
+
+ .. autoattribute:: wrapped
+
+
+.. autoclass:: Placeholder
+
+ .. autoattribute:: name
+
+
+.. autoclass:: Composed
+
+ .. autoattribute:: seq
+
+ .. automethod:: join
diff --git a/doc/src/tools/lib/dbapi_extension.py b/doc/src/tools/lib/dbapi_extension.py
new file mode 100755
index 0000000..7fc776a
--- /dev/null
+++ b/doc/src/tools/lib/dbapi_extension.py
@@ -0,0 +1,50 @@
+"""
+ extension
+ ~~~~~~~~~
+
+ A directive to create a box warning that a certain bit of Psycopg is an
+ extension to the DBAPI 2.0.
+
+ :copyright: Copyright 2010 by Daniele Varrazzo.
+"""
+
+from docutils import nodes
+
+from sphinx.locale import _
+from docutils.parsers.rst import Directive
+
+class extension_node(nodes.Admonition, nodes.Element): pass
+
+
+class Extension(Directive):
+ """
+ An extension entry, displayed as an admonition.
+ """
+
+ has_content = True
+ required_arguments = 0
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {}
+
+ def run(self):
+ node = extension_node('\n'.join(self.content))
+ node += nodes.title(_('DB API extension'), _('DB API extension'))
+ self.state.nested_parse(self.content, self.content_offset, node)
+ node['classes'].append('dbapi-extension')
+ return [node]
+
+
+def visit_extension_node(self, node):
+ self.visit_admonition(node)
+
+def depart_extension_node(self, node):
+ self.depart_admonition(node)
+
+def setup(app):
+ app.add_node(extension_node,
+ html=(visit_extension_node, depart_extension_node),
+ latex=(visit_extension_node, depart_extension_node),
+ text=(visit_extension_node, depart_extension_node))
+
+ app.add_directive('extension', Extension)
diff --git a/doc/src/tools/lib/sql_role.py b/doc/src/tools/lib/sql_role.py
new file mode 100644
index 0000000..1731546
--- /dev/null
+++ b/doc/src/tools/lib/sql_role.py
@@ -0,0 +1,19 @@
+"""
+ sql role
+ ~~~~~~~~
+
+ An interpreted text role to style SQL syntax in Psycopg documentation.
+
+ :copyright: Copyright 2010 by Daniele Varrazzo.
+"""
+
+from docutils import nodes, utils
+from docutils.parsers.rst import roles
+
+def sql_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+ text = utils.unescape(text)
+ options['classes'] = ['sql']
+ return [nodes.literal(rawtext, text, **options)], []
+
+def setup(app):
+ roles.register_local_role('sql', sql_role)
diff --git a/doc/src/tools/lib/ticket_role.py b/doc/src/tools/lib/ticket_role.py
new file mode 100644
index 0000000..8ba87cb
--- /dev/null
+++ b/doc/src/tools/lib/ticket_role.py
@@ -0,0 +1,57 @@
+"""
+ ticket role
+ ~~~~~~~~~~~
+
+ An interpreted text role to link docs to tickets issues.
+
+ :copyright: Copyright 2013 by Daniele Varrazzo.
+"""
+
+import re
+from docutils import nodes, utils
+from docutils.parsers.rst import roles
+
+def ticket_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+ cfg = inliner.document.settings.env.app.config
+ if cfg.ticket_url is None:
+ msg = inliner.reporter.warning(
+ "ticket not configured: please configure ticket_url in conf.py")
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+
+ rv = [nodes.Text(name + ' ')]
+ tokens = re.findall(r'(#?\d+)|([^\d#]+)', text)
+ for ticket, noise in tokens:
+ if ticket:
+ num = int(ticket.replace('#', ''))
+
+ # Push numbers of the oldel tickets ahead.
+ # We moved the tickets from a different tracker to GitHub and the
+ # latter already had a few ticket numbers taken (as merge
+ # requests).
+ remap_until = cfg.ticket_remap_until
+ remap_offset = cfg.ticket_remap_offset
+ if remap_until and remap_offset:
+ if num <= remap_until:
+ num += remap_offset
+
+ url = cfg.ticket_url % num
+ roles.set_classes(options)
+ node = nodes.reference(ticket, utils.unescape(ticket),
+ refuri=url, **options)
+
+ rv.append(node)
+
+ else:
+ assert noise
+ rv.append(nodes.Text(noise))
+
+ return rv, []
+
+
+def setup(app):
+ app.add_config_value('ticket_url', None, 'env')
+ app.add_config_value('ticket_remap_until', None, 'env')
+ app.add_config_value('ticket_remap_offset', None, 'env')
+ app.add_role('ticket', ticket_role)
+ app.add_role('tickets', ticket_role)
diff --git a/doc/src/tools/make_sqlstate_docs.py b/doc/src/tools/make_sqlstate_docs.py
new file mode 100644
index 0000000..16fd9c9
--- /dev/null
+++ b/doc/src/tools/make_sqlstate_docs.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""Create the docs table of the sqlstate errors.
+"""
+
+
+import re
+import sys
+from collections import namedtuple
+
+from psycopg2._psycopg import sqlstate_errors
+
+
+def main():
+ sqlclasses = {}
+ clsfile = sys.argv[1]
+ with open(clsfile) as f:
+ for l in f:
+ m = re.match(r'/\* Class (..) - (.+) \*/', l)
+ if m is not None:
+ sqlclasses[m.group(1)] = m.group(2)
+
+ Line = namedtuple('Line', 'colstate colexc colbase sqlstate')
+
+ lines = [Line('SQLSTATE', 'Exception', 'Base exception', None)]
+ for k in sorted(sqlstate_errors):
+ exc = sqlstate_errors[k]
+ lines.append(Line(
+ f"``{k}``", f"`!{exc.__name__}`",
+ f"`!{get_base_exception(exc).__name__}`", k))
+
+ widths = [max(len(l[c]) for l in lines) for c in range(3)]
+ h = Line(*(['=' * w for w in widths] + [None]))
+ lines.insert(0, h)
+ lines.insert(2, h)
+ lines.append(h)
+
+ h1 = '-' * (sum(widths) + len(widths) - 1)
+ sqlclass = None
+ for l in lines:
+ cls = l.sqlstate[:2] if l.sqlstate else None
+ if cls and cls != sqlclass:
+ print(f"**Class {cls}**: {sqlclasses[cls]}")
+ print(h1)
+ sqlclass = cls
+
+ print("%-*s %-*s %-*s" % (
+ widths[0], l.colstate, widths[1], l.colexc, widths[2], l.colbase))
+
+
+def get_base_exception(exc):
+ for cls in exc.__mro__:
+ if cls.__module__ == 'psycopg2':
+ return cls
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/doc/src/tz.rst b/doc/src/tz.rst
new file mode 100644
index 0000000..c1bd5ff
--- /dev/null
+++ b/doc/src/tz.rst
@@ -0,0 +1,19 @@
+`psycopg2.tz` -- ``tzinfo`` implementations for Psycopg 2
+===============================================================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. module:: psycopg2.tz
+
+.. deprecated:: 2.9
+ The module will be dropped in psycopg 2.10. Use `datetime.timezone`
+ instead.
+
+This module holds two different tzinfo implementations that can be used as the
+`tzinfo` argument to `~datetime.datetime` constructors, directly passed to
+Psycopg functions or used to set the `cursor.tzinfo_factory` attribute in
+cursors.
+
+.. autoclass:: psycopg2.tz.FixedOffsetTimezone
+
+.. autoclass:: psycopg2.tz.LocalTimezone
diff --git a/doc/src/usage.rst b/doc/src/usage.rst
new file mode 100644
index 0000000..5bb69e9
--- /dev/null
+++ b/doc/src/usage.rst
@@ -0,0 +1,1106 @@
+.. _usage:
+
+Basic module usage
+==================
+
+.. sectionauthor:: Daniele Varrazzo <daniele.varrazzo@gmail.com>
+
+.. index::
+ pair: Example; Usage
+
+The basic Psycopg usage is common to all the database adapters implementing
+the |DBAPI|_ protocol. Here is an interactive session showing some of the
+basic commands::
+
+ >>> import psycopg2
+
+ # Connect to an existing database
+ >>> conn = psycopg2.connect("dbname=test user=postgres")
+
+ # Open a cursor to perform database operations
+ >>> cur = conn.cursor()
+
+ # Execute a command: this creates a new table
+ >>> cur.execute("CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);")
+
+ # Pass data to fill a query placeholders and let Psycopg perform
+ # the correct conversion (no more SQL injections!)
+ >>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)",
+ ... (100, "abc'def"))
+
+ # Query the database and obtain data as Python objects
+ >>> cur.execute("SELECT * FROM test;")
+ >>> cur.fetchone()
+ (1, 100, "abc'def")
+
+ # Make the changes to the database persistent
+ >>> conn.commit()
+
+ # Close communication with the database
+ >>> cur.close()
+ >>> conn.close()
+
+
+The main entry points of Psycopg are:
+
+- The function `~psycopg2.connect()` creates a new database session and
+ returns a new `connection` instance.
+
+- The class `connection` encapsulates a database session. It allows to:
+
+ - create new `cursor` instances using the `~connection.cursor()` method to
+ execute database commands and queries,
+
+ - terminate transactions using the methods `~connection.commit()` or
+ `~connection.rollback()`.
+
+- The class `cursor` allows interaction with the database:
+
+ - send commands to the database using methods such as `~cursor.execute()`
+ and `~cursor.executemany()`,
+
+ - retrieve data from the database :ref:`by iteration <cursor-iterable>` or
+ using methods such as `~cursor.fetchone()`, `~cursor.fetchmany()`,
+ `~cursor.fetchall()`.
+
+
+
+.. index::
+ pair: Query; Parameters
+
+.. _query-parameters:
+
+Passing parameters to SQL queries
+---------------------------------
+
+Psycopg converts Python variables to SQL values using their types: the Python
+type determines the function used to convert the object into a string
+representation suitable for PostgreSQL. Many standard Python types are
+already `adapted to the correct SQL representation`__.
+
+.. __: python-types-adaptation_
+
+Passing parameters to an SQL statement happens in functions such as
+`cursor.execute()` by using ``%s`` placeholders in the SQL statement, and
+passing a sequence of values as the second argument of the function. For
+example the Python function call::
+
+ >>> cur.execute("""
+ ... INSERT INTO some_table (an_int, a_date, a_string)
+ ... VALUES (%s, %s, %s);
+ ... """,
+ ... (10, datetime.date(2005, 11, 18), "O'Reilly"))
+
+is converted into a SQL command similar to:
+
+.. code-block:: sql
+
+ INSERT INTO some_table (an_int, a_date, a_string)
+ VALUES (10, '2005-11-18', 'O''Reilly');
+
+Named arguments are supported too using :samp:`%({name})s` placeholders in the
+query and specifying the values into a mapping. Using named arguments allows
+to specify the values in any order and to repeat the same value in several
+places in the query::
+
+ >>> cur.execute("""
+ ... INSERT INTO some_table (an_int, a_date, another_date, a_string)
+ ... VALUES (%(int)s, %(date)s, %(date)s, %(str)s);
+ ... """,
+ ... {'int': 10, 'str': "O'Reilly", 'date': datetime.date(2005, 11, 18)})
+
+Using characters ``%``, ``(``, ``)`` in the argument names is not supported.
+
+When parameters are used, in order to include a literal ``%`` in the query you
+can use the ``%%`` string::
+
+ >>> cur.execute("SELECT (%s % 2) = 0 AS even", (10,)) # WRONG
+ >>> cur.execute("SELECT (%s %% 2) = 0 AS even", (10,)) # correct
+
+While the mechanism resembles regular Python strings manipulation, there are a
+few subtle differences you should care about when passing parameters to a
+query.
+
+- The Python string operator ``%`` *must not be used*: the `~cursor.execute()`
+ method accepts a tuple or dictionary of values as second parameter.
+ |sql-warn|__:
+
+ .. |sql-warn| replace:: **Never** use ``%`` or ``+`` to merge values
+ into queries
+
+ .. __: sql-injection_
+
+ >>> cur.execute("INSERT INTO numbers VALUES (%s, %s)" % (10, 20)) # WRONG
+ >>> cur.execute("INSERT INTO numbers VALUES (%s, %s)", (10, 20)) # correct
+
+- For positional variables binding, *the second argument must always be a
+ sequence*, even if it contains a single variable (remember that Python
+ requires a comma to create a single element tuple)::
+
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", "bar") # WRONG
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar")) # WRONG
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar",)) # correct
+ >>> cur.execute("INSERT INTO foo VALUES (%s)", ["bar"]) # correct
+
+- The placeholder *must not be quoted*. Psycopg will add quotes where needed::
+
+ >>> cur.execute("INSERT INTO numbers VALUES ('%s')", (10,)) # WRONG
+ >>> cur.execute("INSERT INTO numbers VALUES (%s)", (10,)) # correct
+
+- The variables placeholder *must always be a* ``%s``, even if a different
+ placeholder (such as a ``%d`` for integers or ``%f`` for floats) may look
+ more appropriate::
+
+ >>> cur.execute("INSERT INTO numbers VALUES (%d)", (10,)) # WRONG
+ >>> cur.execute("INSERT INTO numbers VALUES (%s)", (10,)) # correct
+
+- Only query values should be bound via this method: it shouldn't be used to
+ merge table or field names to the query (Psycopg will try quoting the table
+ name as a string value, generating invalid SQL). If you need to generate
+ dynamically SQL queries (for instance choosing dynamically a table name)
+ you can use the facilities provided by the `psycopg2.sql` module::
+
+ >>> cur.execute("INSERT INTO %s VALUES (%s)", ('numbers', 10)) # WRONG
+ >>> cur.execute( # correct
+ ... SQL("INSERT INTO {} VALUES (%s)").format(Identifier('numbers')),
+ ... (10,))
+
+
+.. index:: Security, SQL injection
+
+.. _sql-injection:
+
+The problem with the query parameters
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The SQL representation of many data types is often different from their Python
+string representation. The typical example is with single quotes in strings:
+in SQL single quotes are used as string literal delimiters, so the ones
+appearing inside the string itself must be escaped, whereas in Python single
+quotes can be left unescaped if the string is delimited by double quotes.
+
+Because of the difference, sometime subtle, between the data types
+representations, a naïve approach to query strings composition, such as using
+Python strings concatenation, is a recipe for *terrible* problems::
+
+ >>> SQL = "INSERT INTO authors (name) VALUES ('%s');" # NEVER DO THIS
+ >>> data = ("O'Reilly", )
+ >>> cur.execute(SQL % data) # THIS WILL FAIL MISERABLY
+ ProgrammingError: syntax error at or near "Reilly"
+ LINE 1: INSERT INTO authors (name) VALUES ('O'Reilly')
+ ^
+
+If the variables containing the data to send to the database come from an
+untrusted source (such as a form published on a web site) an attacker could
+easily craft a malformed string, either gaining access to unauthorized data or
+performing destructive operations on the database. This form of attack is
+called `SQL injection`_ and is known to be one of the most widespread forms of
+attack to database servers. Before continuing, please print `this page`__ as a
+memo and hang it onto your desk.
+
+.. _SQL injection: https://en.wikipedia.org/wiki/SQL_injection
+.. __: https://xkcd.com/327/
+
+Psycopg can `automatically convert Python objects to and from SQL
+literals`__: using this feature your code will be more robust and
+reliable. We must stress this point:
+
+.. __: python-types-adaptation_
+
+.. warning::
+
+ Never, **never**, **NEVER** use Python string concatenation (``+``) or
+ string parameters interpolation (``%``) to pass variables to a SQL query
+ string. Not even at gunpoint.
+
+The correct way to pass variables in a SQL command is using the second
+argument of the `~cursor.execute()` method::
+
+ >>> SQL = "INSERT INTO authors (name) VALUES (%s);" # Note: no quotes
+ >>> data = ("O'Reilly", )
+ >>> cur.execute(SQL, data) # Note: no % operator
+
+
+Values containing backslashes and LIKE
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Unlike in Python, the backslash (`\\`) is not used as an escape
+character *except* in patterns used with `LIKE` and `ILIKE` where they
+are needed to escape the `%` and `_` characters.
+
+This can lead to confusing situations::
+
+ >>> path = r'C:\Users\Bobby.Tables'
+ >>> cur.execute('INSERT INTO mytable(path) VALUES (%s)', (path,))
+ >>> cur.execute('SELECT * FROM mytable WHERE path LIKE %s', (path,))
+ >>> cur.fetchall()
+ []
+
+The solution is to specify an `ESCAPE` character of `''` (empty string)
+in your `LIKE` query::
+
+ >>> cur.execute("SELECT * FROM mytable WHERE path LIKE %s ESCAPE ''", (path,))
+
+
+
+.. index::
+ single: Adaptation
+ pair: Objects; Adaptation
+ single: Data types; Adaptation
+
+.. _python-types-adaptation:
+
+Adaptation of Python values to SQL types
+----------------------------------------
+
+Many standard Python types are adapted into SQL and returned as Python
+objects when a query is executed.
+
+The following table shows the default mapping between Python and PostgreSQL
+types:
+
+..
+ TODO: The table is not rendered in text output
+
+.. only:: html
+
+ .. table::
+ :class: data-types
+
+ +--------------------+-------------------------+--------------------------+
+ | Python | PostgreSQL | See also |
+ +====================+=========================+==========================+
+ | `!None` | :sql:`NULL` | :ref:`adapt-consts` |
+ +--------------------+-------------------------+ |
+ | `!bool` | :sql:`bool` | |
+ +--------------------+-------------------------+--------------------------+
+ | `!float` | | :sql:`real` | :ref:`adapt-numbers` |
+ | | | :sql:`double` | |
+ +--------------------+-------------------------+ |
+ | | `!int` | | :sql:`smallint` | |
+ | | `!long` | | :sql:`integer` | |
+ | | | :sql:`bigint` | |
+ +--------------------+-------------------------+ |
+ | `~decimal.Decimal` | :sql:`numeric` | |
+ +--------------------+-------------------------+--------------------------+
+ | | `!str` | | :sql:`varchar` | :ref:`adapt-string` |
+ | | `!unicode` | | :sql:`text` | |
+ +--------------------+-------------------------+--------------------------+
+ | | `buffer` | :sql:`bytea` | :ref:`adapt-binary` |
+ | | `memoryview` | | |
+ | | `bytearray` | | |
+ | | `bytes` | | |
+ | | Buffer protocol | | |
+ +--------------------+-------------------------+--------------------------+
+ | `!date` | :sql:`date` | :ref:`adapt-date` |
+ +--------------------+-------------------------+ |
+ | `!time` | | :sql:`time` | |
+ | | | :sql:`timetz` | |
+ +--------------------+-------------------------+ |
+ | `!datetime` | | :sql:`timestamp` | |
+ | | | :sql:`timestamptz` | |
+ +--------------------+-------------------------+ |
+ | `!timedelta` | :sql:`interval` | |
+ +--------------------+-------------------------+--------------------------+
+ | `!list` | :sql:`ARRAY` | :ref:`adapt-list` |
+ +--------------------+-------------------------+--------------------------+
+ | | `!tuple` | | Composite types | | :ref:`adapt-tuple` |
+ | | `!namedtuple` | | :sql:`IN` syntax | | :ref:`adapt-composite` |
+ +--------------------+-------------------------+--------------------------+
+ | `!dict` | :sql:`hstore` | :ref:`adapt-hstore` |
+ +--------------------+-------------------------+--------------------------+
+ | Psycopg's `!Range` | :sql:`range` | :ref:`adapt-range` |
+ +--------------------+-------------------------+--------------------------+
+ | Anything\ |tm| | :sql:`json` | :ref:`adapt-json` |
+ +--------------------+-------------------------+--------------------------+
+ | `~uuid.UUID` | :sql:`uuid` | :ref:`adapt-uuid` |
+ +--------------------+-------------------------+--------------------------+
+ | `ipaddress` | | :sql:`inet` | :ref:`adapt-network` |
+ | objects | | :sql:`cidr` | |
+ +--------------------+-------------------------+--------------------------+
+
+.. |tm| unicode:: U+2122
+
+The mapping is fairly customizable: see :ref:`adapting-new-types` and
+:ref:`type-casting-from-sql-to-python`. You can also find a few other
+specialized adapters in the `psycopg2.extras` module.
+
+
+.. index::
+ pair: None; Adaptation
+ single: NULL; Adaptation
+ pair: Boolean; Adaptation
+
+.. _adapt-consts:
+
+Constants adaptation
+^^^^^^^^^^^^^^^^^^^^
+
+Python `None` and boolean values `True` and `False` are converted into the
+proper SQL literals::
+
+ >>> cur.mogrify("SELECT %s, %s, %s;", (None, True, False))
+ 'SELECT NULL, true, false;'
+
+
+.. index::
+ single: Adaptation; numbers
+ single: Integer; Adaptation
+ single: Float; Adaptation
+ single: Decimal; Adaptation
+
+.. _adapt-numbers:
+
+Numbers adaptation
+^^^^^^^^^^^^^^^^^^
+
+Python numeric objects `int`, `long`, `float`, `~decimal.Decimal` are
+converted into a PostgreSQL numerical representation::
+
+ >>> cur.mogrify("SELECT %s, %s, %s, %s;", (10, 10L, 10.0, Decimal("10.00")))
+ 'SELECT 10, 10, 10.0, 10.00;'
+
+Reading from the database, integer types are converted into `!int`, floating
+point types are converted into `!float`, :sql:`numeric`\/\ :sql:`decimal` are
+converted into `!Decimal`.
+
+.. note::
+
+ Sometimes you may prefer to receive :sql:`numeric` data as `!float`
+ instead, for performance reason or ease of manipulation: you can configure
+ an adapter to :ref:`cast PostgreSQL numeric to Python float <faq-float>`.
+ This of course may imply a loss of precision.
+
+.. seealso:: `PostgreSQL numeric types
+ <https://www.postgresql.org/docs/current/static/datatype-numeric.html>`__
+
+
+.. index::
+ pair: Strings; Adaptation
+ single: Unicode; Adaptation
+
+.. _adapt-string:
+
+Strings adaptation
+^^^^^^^^^^^^^^^^^^
+
+Python `str` and `unicode` are converted into the SQL string syntax.
+`!unicode` objects (`!str` in Python 3) are encoded in the connection
+`~connection.encoding` before sending to the backend: trying to send a
+character not supported by the encoding will result in an error. Data is
+usually received as `!str` (*i.e.* it is *decoded* on Python 3, left *encoded*
+on Python 2). However it is possible to receive `!unicode` on Python 2 too:
+see :ref:`unicode-handling`.
+
+
+.. index::
+ single: Unicode
+
+.. _unicode-handling:
+
+Unicode handling
+''''''''''''''''
+
+Psycopg can exchange Unicode data with a PostgreSQL database. Python
+`!unicode` objects are automatically *encoded* in the client encoding
+defined on the database connection (the `PostgreSQL encoding`__, available in
+`connection.encoding`, is translated into a `Python encoding`__ using the
+`~psycopg2.extensions.encodings` mapping)::
+
+ >>> print u, type(u)
+ àèìòù€ <type 'unicode'>
+
+ >>> cur.execute("INSERT INTO test (num, data) VALUES (%s,%s);", (74, u))
+
+.. __: https://www.postgresql.org/docs/current/static/multibyte.html
+.. __: https://docs.python.org/library/codecs.html
+
+When reading data from the database, in Python 2 the strings returned are
+usually 8 bit `!str` objects encoded in the database client encoding::
+
+ >>> print conn.encoding
+ UTF8
+
+ >>> cur.execute("SELECT data FROM test WHERE num = 74")
+ >>> x = cur.fetchone()[0]
+ >>> print x, type(x), repr(x)
+ àèìòù€ <type 'str'> '\xc3\xa0\xc3\xa8\xc3\xac\xc3\xb2\xc3\xb9\xe2\x82\xac'
+
+ >>> conn.set_client_encoding('LATIN9')
+
+ >>> cur.execute("SELECT data FROM test WHERE num = 74")
+ >>> x = cur.fetchone()[0]
+ >>> print type(x), repr(x)
+ <type 'str'> '\xe0\xe8\xec\xf2\xf9\xa4'
+
+In Python 3 instead the strings are automatically *decoded* in the connection
+`~connection.encoding`, as the `!str` object can represent Unicode characters.
+In Python 2 you must register a :ref:`typecaster
+<type-casting-from-sql-to-python>` in order to receive `!unicode` objects::
+
+ >>> psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, cur)
+
+ >>> cur.execute("SELECT data FROM test WHERE num = 74")
+ >>> x = cur.fetchone()[0]
+ >>> print x, type(x), repr(x)
+ àèìòù€ <type 'unicode'> u'\xe0\xe8\xec\xf2\xf9\u20ac'
+
+In the above example, the `~psycopg2.extensions.UNICODE` typecaster is
+registered only on the cursor. It is also possible to register typecasters on
+the connection or globally: see the function
+`~psycopg2.extensions.register_type()` and
+:ref:`type-casting-from-sql-to-python` for details.
+
+.. note::
+
+ In Python 2, if you want to uniformly receive all your database input in
+ Unicode, you can register the related typecasters globally as soon as
+ Psycopg is imported::
+
+ import psycopg2.extensions
+ psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+ psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
+
+ and forget about this story.
+
+.. note::
+
+ In some cases, on Python 3, you may want to receive `!bytes` instead of
+ `!str`, without undergoing to any decoding. This is especially the case if
+ the data in the database is in mixed encoding. The
+ `~psycopg2.extensions.BYTES` caster is what you neeed::
+
+ import psycopg2.extensions
+ psycopg2.extensions.register_type(psycopg2.extensions.BYTES, conn)
+ psycopg2.extensions.register_type(psycopg2.extensions.BYTESARRAY, conn)
+ cur = conn.cursor()
+ cur.execute("select %s::text", (u"€",))
+ cur.fetchone()[0]
+ b'\xe2\x82\xac'
+
+ .. versionadded: 2.8
+
+
+.. index::
+ single: Buffer; Adaptation
+ single: bytea; Adaptation
+ single: bytes; Adaptation
+ single: bytearray; Adaptation
+ single: memoryview; Adaptation
+ single: Binary string
+
+.. _adapt-binary:
+
+Binary adaptation
+^^^^^^^^^^^^^^^^^
+
+Python types representing binary objects are converted into PostgreSQL binary
+string syntax, suitable for :sql:`bytea` fields. Such types are `buffer`
+(only available in Python 2), `memoryview`, `bytearray`, and `bytes` (only in
+Python 3: the name is available in Python 2 but it's only an alias for the
+type `!str`). Any object implementing the `Revised Buffer Protocol`__ should
+be usable as binary type. Received data is returned as `!buffer` (in Python 2)
+or `!memoryview` (in Python 3).
+
+.. __: https://www.python.org/dev/peps/pep-3118/
+
+.. versionchanged:: 2.4
+ only strings were supported before.
+
+.. versionchanged:: 2.4.1
+ can parse the 'hex' format from 9.0 servers without relying on the
+ version of the client library.
+
+.. note::
+
+ In Python 2, if you have binary data in a `!str` object, you can pass them
+ to a :sql:`bytea` field using the `psycopg2.Binary` wrapper::
+
+ mypic = open('picture.png', 'rb').read()
+ curs.execute("insert into blobs (file) values (%s)",
+ (psycopg2.Binary(mypic),))
+
+.. warning::
+
+ Since version 9.0 PostgreSQL uses by default `a new "hex" format`__ to
+ emit :sql:`bytea` fields. Starting from Psycopg 2.4.1 the format is
+ correctly supported. If you use a previous version you will need some
+ extra care when receiving bytea from PostgreSQL: you must have at least
+ libpq 9.0 installed on the client or alternatively you can set the
+ `bytea_output`__ configuration parameter to ``escape``, either in the
+ server configuration file or in the client session (using a query such as
+ ``SET bytea_output TO escape;``) before receiving binary data.
+
+ .. __: https://www.postgresql.org/docs/current/static/datatype-binary.html
+ .. __: https://www.postgresql.org/docs/current/static/runtime-config-client.html#GUC-BYTEA-OUTPUT
+
+
+.. index::
+ single: Adaptation; Date/Time objects
+ single: Date objects; Adaptation
+ single: Time objects; Adaptation
+ single: Interval objects; Adaptation
+
+.. _adapt-date:
+
+Date/Time objects adaptation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Python builtin `~datetime.datetime`, `~datetime.date`,
+`~datetime.time`, `~datetime.timedelta` are converted into PostgreSQL's
+:sql:`timestamp[tz]`, :sql:`date`, :sql:`time[tz]`, :sql:`interval` data types.
+Time zones are supported too.
+
+ >>> dt = datetime.datetime.now()
+ >>> dt
+ datetime.datetime(2010, 2, 8, 1, 40, 27, 425337)
+
+ >>> cur.mogrify("SELECT %s, %s, %s;", (dt, dt.date(), dt.time()))
+ "SELECT '2010-02-08T01:40:27.425337', '2010-02-08', '01:40:27.425337';"
+
+ >>> cur.mogrify("SELECT %s;", (dt - datetime.datetime(2010,1,1),))
+ "SELECT '38 days 6027.425337 seconds';"
+
+.. seealso:: `PostgreSQL date/time types
+ <https://www.postgresql.org/docs/current/static/datatype-datetime.html>`__
+
+
+.. index::
+ single: Time Zones
+
+.. _tz-handling:
+
+Time zones handling
+'''''''''''''''''''
+
+The PostgreSQL type :sql:`timestamp with time zone` (a.k.a.
+:sql:`timestamptz`) is converted into Python `~datetime.datetime` objects.
+
+ >>> cur.execute("SET TIME ZONE 'Europe/Rome'") # UTC + 1 hour
+ >>> cur.execute("SELECT '2010-01-01 10:30:45'::timestamptz")
+ >>> cur.fetchone()[0]
+ datetime.datetime(2010, 1, 1, 10, 30, 45,
+ tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)))
+
+.. note::
+
+ Before Python 3.7, the `datetime` module only supported timezones with an
+ integer number of minutes. A few historical time zones had seconds in the
+ UTC offset: these time zones will have the offset rounded to the nearest
+ minute, with an error of up to 30 seconds, on Python versions before 3.7.
+
+ >>> cur.execute("SET TIME ZONE 'Asia/Calcutta'") # offset was +5:21:10
+ >>> cur.execute("SELECT '1900-01-01 10:30:45'::timestamptz")
+ >>> cur.fetchone()[0].tzinfo
+ # On Python 3.6: 5h, 21m
+ datetime.timezone(datetime.timedelta(0, 19260))
+ # On Python 3.7 and following: 5h, 21m, 10s
+ datetime.timezone(datetime.timedelta(seconds=19270))
+
+.. versionchanged:: 2.2.2
+ timezones with seconds are supported (with rounding). Previously such
+ timezones raised an error.
+
+.. versionchanged:: 2.9
+ timezones with seconds are supported without rounding.
+
+.. versionchanged:: 2.9
+ use `datetime.timezone` as default tzinfo object instead of
+ `~psycopg2.tz.FixedOffsetTimezone`.
+
+.. index::
+ double: Date objects; Infinite
+
+.. _infinite-dates-handling:
+
+Infinite dates handling
+'''''''''''''''''''''''
+
+PostgreSQL can store the representation of an "infinite" date, timestamp, or
+interval. Infinite dates are not available to Python, so these objects are
+mapped to `!date.max`, `!datetime.max`, `!interval.max`. Unfortunately the
+mapping cannot be bidirectional so these dates will be stored back into the
+database with their values, such as :sql:`9999-12-31`.
+
+It is possible to create an alternative adapter for dates and other objects
+to map `date.max` to :sql:`infinity`, for instance::
+
+ class InfDateAdapter:
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+ def getquoted(self):
+ if self.wrapped == datetime.date.max:
+ return b"'infinity'::date"
+ elif self.wrapped == datetime.date.min:
+ return b"'-infinity'::date"
+ else:
+ return psycopg2.extensions.DateFromPy(self.wrapped).getquoted()
+
+ psycopg2.extensions.register_adapter(datetime.date, InfDateAdapter)
+
+Of course it will not be possible to write the value of `date.max` in the
+database anymore: :sql:`infinity` will be stored instead.
+
+
+.. _time-handling:
+
+Time handling
+'''''''''''''
+
+The PostgreSQL :sql:`time` and Python `~datetime.time` types are not
+fully bidirectional.
+
+Within PostgreSQL, the :sql:`time` type's maximum value of ``24:00:00`` is
+treated as 24-hours later than the minimum value of ``00:00:00``.
+
+ >>> cur.execute("SELECT '24:00:00'::time - '00:00:00'::time")
+ >>> cur.fetchone()[0]
+ datetime.timedelta(days=1)
+
+However, Python's `!time` only supports times until ``23:59:59``.
+Retrieving a value of ``24:00:00`` results in a `!time` of ``00:00:00``.
+
+ >>> cur.execute("SELECT '24:00:00'::time, '00:00:00'::time")
+ >>> cur.fetchone()
+ (datetime.time(0, 0), datetime.time(0, 0))
+
+
+.. _adapt-list:
+
+Lists adaptation
+^^^^^^^^^^^^^^^^
+
+.. index::
+ single: Array; Adaptation
+ double: Lists; Adaptation
+
+Python lists are converted into PostgreSQL :sql:`ARRAY`\ s::
+
+ >>> cur.mogrify("SELECT %s;", ([10, 20, 30], ))
+ 'SELECT ARRAY[10,20,30];'
+
+.. note::
+
+ You can use a Python list as the argument of the :sql:`IN` operator using
+ `the PostgreSQL ANY operator`__. ::
+
+ ids = [10, 20, 30]
+ cur.execute("SELECT * FROM data WHERE id = ANY(%s);", (ids,))
+
+ Furthermore :sql:`ANY` can also work with empty lists, whereas :sql:`IN ()`
+ is a SQL syntax error.
+
+ .. __: https://www.postgresql.org/docs/current/static/functions-subquery.html#FUNCTIONS-SUBQUERY-ANY-SOME
+
+.. note::
+
+ Reading back from PostgreSQL, arrays are converted to lists of Python
+ objects as expected, but only if the items are of a known type.
+ Arrays of unknown types are returned as represented by the database (e.g.
+ ``{a,b,c}``). If you want to convert the items into Python objects you can
+ easily create a typecaster for :ref:`array of unknown types
+ <cast-array-unknown>`.
+
+
+.. _adapt-tuple:
+
+Tuples adaptation
+^^^^^^^^^^^^^^^^^^
+
+.. index::
+ double: Tuple; Adaptation
+ single: IN operator
+
+Python tuples are converted into a syntax suitable for the SQL :sql:`IN`
+operator and to represent a composite type::
+
+ >>> cur.mogrify("SELECT %s IN %s;", (10, (10, 20, 30)))
+ 'SELECT 10 IN (10, 20, 30);'
+
+.. note::
+
+ SQL doesn't allow an empty list in the :sql:`IN` operator, so your code
+ should guard against empty tuples. Alternatively you can :ref:`use a
+ Python list <adapt-list>`.
+
+If you want PostgreSQL composite types to be converted into a Python
+tuple/namedtuple you can use the `~psycopg2.extras.register_composite()`
+function.
+
+.. versionadded:: 2.0.6
+ the tuple :sql:`IN` adaptation.
+
+.. versionchanged:: 2.0.14
+ the tuple :sql:`IN` adapter is always active. In previous releases it
+ was necessary to import the `~psycopg2.extensions` module to have it
+ registered.
+
+.. versionchanged:: 2.3
+ `~collections.namedtuple` instances are adapted like regular tuples and
+ can thus be used to represent composite types.
+
+
+.. index:: Transaction, Begin, Commit, Rollback, Autocommit, Read only
+
+.. _transactions-control:
+
+Transactions control
+--------------------
+
+In Psycopg transactions are handled by the `connection` class. By
+default, the first time a command is sent to the database (using one of the
+`cursor`\ s created by the connection), a new transaction is created.
+The following database commands will be executed in the context of the same
+transaction -- not only the commands issued by the first cursor, but the ones
+issued by all the cursors created by the same connection. Should any command
+fail, the transaction will be aborted and no further command will be executed
+until a call to the `~connection.rollback()` method.
+
+The connection is responsible for terminating its transaction, calling either
+the `~connection.commit()` or `~connection.rollback()` method. Committed
+changes are immediately made persistent in the database. If the connection
+is closed (using the `~connection.close()` method) or destroyed (using `!del`
+or by letting it fall out of scope) while a transaction is in progress, the
+server will discard the transaction. However doing so is not advisable:
+middleware such as PgBouncer_ may see the connection closed uncleanly and
+dispose of it.
+
+.. _PgBouncer: http://www.pgbouncer.org/
+
+It is possible to set the connection in *autocommit* mode: this way all the
+commands executed will be immediately committed and no rollback is possible. A
+few commands (e.g. :sql:`CREATE DATABASE`, :sql:`VACUUM`, :sql:`CALL` on
+`stored procedures`__ using transaction control...) require to be run
+outside any transaction: in order to be able to run these commands from
+Psycopg, the connection must be in autocommit mode: you can use the
+`~connection.autocommit` property.
+
+.. __: https://www.postgresql.org/docs/current/xproc.html
+
+.. warning::
+
+ By default even a simple :sql:`SELECT` will start a transaction: in
+ long-running programs, if no further action is taken, the session will
+ remain "idle in transaction", an undesirable condition for several
+ reasons (locks are held by the session, tables bloat...). For long lived
+ scripts, either make sure to terminate a transaction as soon as possible or
+ use an autocommit connection.
+
+A few other transaction properties can be set session-wide by the
+`!connection`: for instance it is possible to have read-only transactions or
+change the isolation level. See the `~connection.set_session()` method for all
+the details.
+
+
+.. index::
+ single: with statement
+
+``with`` statement
+^^^^^^^^^^^^^^^^^^
+
+Starting from version 2.5, psycopg2's connections and cursors are *context
+managers* and can be used with the ``with`` statement::
+
+ with psycopg2.connect(DSN) as conn:
+ with conn.cursor() as curs:
+ curs.execute(SQL)
+
+When a connection exits the ``with`` block, if no exception has been raised by
+the block, the transaction is committed. In case of exception the transaction
+is rolled back.
+
+When a cursor exits the ``with`` block it is closed, releasing any resource
+eventually associated with it. The state of the transaction is not affected.
+
+A connection can be used in more than a ``with`` statement
+and each ``with`` block is effectively wrapped in a separate transaction::
+
+ conn = psycopg2.connect(DSN)
+
+ with conn:
+ with conn.cursor() as curs:
+ curs.execute(SQL1)
+
+ with conn:
+ with conn.cursor() as curs:
+ curs.execute(SQL2)
+
+ conn.close()
+
+.. warning::
+
+ Unlike file objects or other resources, exiting the connection's
+ ``with`` block **doesn't close the connection**, but only the transaction
+ associated to it. If you want to make sure the connection is closed after
+ a certain point, you should still use a try-catch block::
+
+ conn = psycopg2.connect(DSN)
+ try:
+ # connection usage
+ finally:
+ conn.close()
+
+.. versionchanged:: 2.9
+ ``with connection`` starts a transaction also on autocommit connections.
+
+
+.. index::
+ pair: Server side; Cursor
+ pair: Named; Cursor
+ pair: DECLARE; SQL command
+ pair: FETCH; SQL command
+ pair: MOVE; SQL command
+
+.. _server-side-cursors:
+
+Server side cursors
+-------------------
+
+When a database query is executed, the Psycopg `cursor` usually fetches
+all the records returned by the backend, transferring them to the client
+process. If the query returned an huge amount of data, a proportionally large
+amount of memory will be allocated by the client.
+
+If the dataset is too large to be practically handled on the client side, it is
+possible to create a *server side* cursor. Using this kind of cursor it is
+possible to transfer to the client only a controlled amount of data, so that a
+large dataset can be examined without keeping it entirely in memory.
+
+Server side cursor are created in PostgreSQL using the |DECLARE|_ command and
+subsequently handled using :sql:`MOVE`, :sql:`FETCH` and :sql:`CLOSE` commands.
+
+Psycopg wraps the database server side cursor in *named cursors*. A named
+cursor is created using the `~connection.cursor()` method specifying the
+*name* parameter. Such cursor will behave mostly like a regular cursor,
+allowing the user to move in the dataset using the `~cursor.scroll()`
+method and to read the data using `~cursor.fetchone()` and
+`~cursor.fetchmany()` methods. Normally you can only scroll forward in a
+cursor: if you need to scroll backwards you should declare your cursor
+`~cursor.scrollable`.
+
+Named cursors are also :ref:`iterable <cursor-iterable>` like regular cursors.
+Note however that before Psycopg 2.4 iteration was performed fetching one
+record at time from the backend, resulting in a large overhead. The attribute
+`~cursor.itersize` now controls how many records are fetched at time
+during the iteration: the default value of 2000 allows to fetch about 100KB
+per roundtrip assuming records of 10-20 columns of mixed number and strings;
+you may decrease this value if you are dealing with huge records.
+
+Named cursors are usually created :sql:`WITHOUT HOLD`, meaning they live only
+as long as the current transaction. Trying to fetch from a named cursor after
+a `~connection.commit()` or to create a named cursor when the connection
+is in `~connection.autocommit` mode will result in an exception.
+It is possible to create a :sql:`WITH HOLD` cursor by specifying a `!True`
+value for the `withhold` parameter to `~connection.cursor()` or by setting the
+`~cursor.withhold` attribute to `!True` before calling `~cursor.execute()` on
+the cursor. It is extremely important to always `~cursor.close()` such cursors,
+otherwise they will continue to hold server-side resources until the connection
+will be eventually closed. Also note that while :sql:`WITH HOLD` cursors
+lifetime extends well after `~connection.commit()`, calling
+`~connection.rollback()` will automatically close the cursor.
+
+.. note::
+
+ It is also possible to use a named cursor to consume a cursor created
+ in some other way than using the |DECLARE| executed by
+ `~cursor.execute()`. For example, you may have a PL/pgSQL function
+ returning a cursor:
+
+ .. code-block:: postgres
+
+ CREATE FUNCTION reffunc(refcursor) RETURNS refcursor AS $$
+ BEGIN
+ OPEN $1 FOR SELECT col FROM test;
+ RETURN $1;
+ END;
+ $$ LANGUAGE plpgsql;
+
+ You can read the cursor content by calling the function with a regular,
+ non-named, Psycopg cursor:
+
+ .. code-block:: python
+
+ cur1 = conn.cursor()
+ cur1.callproc('reffunc', ['curname'])
+
+ and then use a named cursor in the same transaction to "steal the cursor":
+
+ .. code-block:: python
+
+ cur2 = conn.cursor('curname')
+ for record in cur2: # or cur2.fetchone, fetchmany...
+ # do something with record
+ pass
+
+
+.. |DECLARE| replace:: :sql:`DECLARE`
+.. _DECLARE: https://www.postgresql.org/docs/current/static/sql-declare.html
+
+
+
+.. index:: Thread safety, Multithread, Multiprocess
+
+.. _thread-safety:
+
+Thread and process safety
+-------------------------
+
+The Psycopg module and the `connection` objects are *thread-safe*: many
+threads can access the same database either using separate sessions and
+creating a `!connection` per thread or using the same
+connection and creating separate `cursor`\ s. In |DBAPI|_ parlance, Psycopg is
+*level 2 thread safe*.
+
+The difference between the above two approaches is that, using different
+connections, the commands will be executed in different sessions and will be
+served by different server processes. On the other hand, using many cursors on
+the same connection, all the commands will be executed in the same session
+(and in the same transaction if the connection is not in :ref:`autocommit
+<transactions-control>` mode), but they will be serialized.
+
+The above observations are only valid for regular threads: they don't apply to
+forked processes nor to green threads. `libpq` connections `shouldn't be used by a
+forked processes`__, so when using a module such as `multiprocessing` or a
+forking web deploy method such as FastCGI make sure to create the connections
+*after* the fork.
+
+.. __: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNECT
+
+Connections shouldn't be shared either by different green threads: see
+:ref:`green-support` for further details.
+
+
+
+.. index::
+ pair: COPY; SQL command
+
+.. _copy:
+
+Using COPY TO and COPY FROM
+---------------------------
+
+Psycopg `cursor` objects provide an interface to the efficient
+PostgreSQL |COPY|__ command to move data from files to tables and back.
+
+Currently no adaptation is provided between Python and PostgreSQL types on
+|COPY|: the file can be any Python file-like object but its format must be in
+the format accepted by `PostgreSQL COPY command`__ (data format, escaped
+characters, etc).
+
+.. __: COPY_
+
+The methods exposed are:
+
+`~cursor.copy_from()`
+ Reads data *from* a file-like object appending them to a database table
+ (:sql:`COPY table FROM file` syntax). The source file must provide both
+ `!read()` and `!readline()` method.
+
+`~cursor.copy_to()`
+ Writes the content of a table *to* a file-like object (:sql:`COPY table TO
+ file` syntax). The target file must have a `write()` method.
+
+`~cursor.copy_expert()`
+ Allows to handle more specific cases and to use all the :sql:`COPY`
+ features available in PostgreSQL.
+
+Please refer to the documentation of the single methods for details and
+examples.
+
+.. |COPY| replace:: :sql:`COPY`
+.. __: https://www.postgresql.org/docs/current/static/sql-copy.html
+
+
+
+.. index::
+ single: Large objects
+
+.. _large-objects:
+
+Access to PostgreSQL large objects
+----------------------------------
+
+PostgreSQL offers support for `large objects`__, which provide stream-style
+access to user data that is stored in a special large-object structure. They
+are useful with data values too large to be manipulated conveniently as a
+whole.
+
+.. __: https://www.postgresql.org/docs/current/static/largeobjects.html
+
+Psycopg allows access to the large object using the
+`~psycopg2.extensions.lobject` class. Objects are generated using the
+`connection.lobject()` factory method. Data can be retrieved either as bytes
+or as Unicode strings.
+
+Psycopg large object support efficient import/export with file system files
+using the |lo_import|_ and |lo_export|_ libpq functions.
+
+.. |lo_import| replace:: `!lo_import()`
+.. _lo_import: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-IMPORT
+.. |lo_export| replace:: `!lo_export()`
+.. _lo_export: https://www.postgresql.org/docs/current/static/lo-interfaces.html#LO-EXPORT
+
+.. versionchanged:: 2.6
+ added support for large objects greater than 2GB. Note that the support is
+ enabled only if all the following conditions are verified:
+
+ - the Python build is 64 bits;
+ - the extension was built against at least libpq 9.3;
+ - the server version is at least PostgreSQL 9.3
+ (`~connection.server_version` must be >= ``90300``).
+
+ If Psycopg was built with 64 bits large objects support (i.e. the first
+ two contidions above are verified), the `psycopg2.__version__` constant
+ will contain the ``lo64`` flag. If any of the contition is not met
+ several `!lobject` methods will fail if the arguments exceed 2GB.
+
+
+
+.. index::
+ pair: Two-phase commit; Transaction
+
+.. _tpc:
+
+Two-Phase Commit protocol support
+---------------------------------
+
+.. versionadded:: 2.3
+
+Psycopg exposes the two-phase commit features available since PostgreSQL 8.1
+implementing the *two-phase commit extensions* proposed by the |DBAPI|.
+
+The |DBAPI| model of two-phase commit is inspired by the `XA specification`__,
+according to which transaction IDs are formed from three components:
+
+- a format ID (non-negative 32 bit integer)
+- a global transaction ID (string not longer than 64 bytes)
+- a branch qualifier (string not longer than 64 bytes)
+
+For a particular global transaction, the first two components will be the same
+for all the resources. Every resource will be assigned a different branch
+qualifier.
+
+According to the |DBAPI| specification, a transaction ID is created using the
+`connection.xid()` method. Once you have a transaction id, a distributed
+transaction can be started with `connection.tpc_begin()`, prepared using
+`~connection.tpc_prepare()` and completed using `~connection.tpc_commit()` or
+`~connection.tpc_rollback()`. Transaction IDs can also be retrieved from the
+database using `~connection.tpc_recover()` and completed using the above
+`!tpc_commit()` and `!tpc_rollback()`.
+
+PostgreSQL doesn't follow the XA standard though, and the ID for a PostgreSQL
+prepared transaction can be any string up to 200 characters long.
+Psycopg's `~psycopg2.extensions.Xid` objects can represent both XA-style
+transactions IDs (such as the ones created by the `!xid()` method) and
+PostgreSQL transaction IDs identified by an unparsed string.
+
+The format in which the Xids are converted into strings passed to the
+database is the same employed by the `PostgreSQL JDBC driver`__: this should
+allow interoperation between tools written in Python and in Java. For example
+a recovery tool written in Python would be able to recognize the components of
+transactions produced by a Java program.
+
+For further details see the documentation for the above methods.
+
+.. __: https://publications.opengroup.org/c193
+.. __: https://jdbc.postgresql.org/
diff --git a/lib/__init__.py b/lib/__init__.py
new file mode 100644
index 0000000..59a8938
--- /dev/null
+++ b/lib/__init__.py
@@ -0,0 +1,126 @@
+"""A Python driver for PostgreSQL
+
+psycopg is a PostgreSQL_ database adapter for the Python_ programming
+language. This is version 2, a complete rewrite of the original code to
+provide new-style classes for connection and cursor objects and other sweet
+candies. Like the original, psycopg 2 was written with the aim of being very
+small and fast, and stable as a rock.
+
+Homepage: https://psycopg.org/
+
+.. _PostgreSQL: https://www.postgresql.org/
+.. _Python: https://www.python.org/
+
+:Groups:
+ * `Connections creation`: connect
+ * `Value objects constructors`: Binary, Date, DateFromTicks, Time,
+ TimeFromTicks, Timestamp, TimestampFromTicks
+"""
+# psycopg/__init__.py - initialization of the psycopg module
+#
+# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# Import modules needed by _psycopg to allow tools like py2exe to do
+# their work without bothering about the module dependencies.
+
+# Note: the first internal import should be _psycopg, otherwise the real cause
+# of a failed loading of the C module may get hidden, see
+# https://archives.postgresql.org/psycopg/2011-02/msg00044.php
+
+# Import the DBAPI-2.0 stuff into top-level module.
+
+from psycopg2._psycopg import ( # noqa
+ BINARY, NUMBER, STRING, DATETIME, ROWID,
+
+ Binary, Date, Time, Timestamp,
+ DateFromTicks, TimeFromTicks, TimestampFromTicks,
+
+ Error, Warning, DataError, DatabaseError, ProgrammingError, IntegrityError,
+ InterfaceError, InternalError, NotSupportedError, OperationalError,
+
+ _connect, apilevel, threadsafety, paramstyle,
+ __version__, __libpq_version__,
+)
+
+
+# Register default adapters.
+
+from psycopg2 import extensions as _ext
+_ext.register_adapter(tuple, _ext.SQL_IN)
+_ext.register_adapter(type(None), _ext.NoneAdapter)
+
+# Register the Decimal adapter here instead of in the C layer.
+# This way a new class is registered for each sub-interpreter.
+# See ticket #52
+from decimal import Decimal # noqa
+from psycopg2._psycopg import Decimal as Adapter # noqa
+_ext.register_adapter(Decimal, Adapter)
+del Decimal, Adapter
+
+
+def connect(dsn=None, connection_factory=None, cursor_factory=None, **kwargs):
+ """
+ Create a new database connection.
+
+ The connection parameters can be specified as a string:
+
+ conn = psycopg2.connect("dbname=test user=postgres password=secret")
+
+ or using a set of keyword arguments:
+
+ conn = psycopg2.connect(database="test", user="postgres", password="secret")
+
+ Or as a mix of both. The basic connection parameters are:
+
+ - *dbname*: the database name
+ - *database*: the database name (only as keyword argument)
+ - *user*: user name used to authenticate
+ - *password*: password used to authenticate
+ - *host*: database host address (defaults to UNIX socket if not provided)
+ - *port*: connection port number (defaults to 5432 if not provided)
+
+ Using the *connection_factory* parameter a different class or connections
+ factory can be specified. It should be a callable object taking a dsn
+ argument.
+
+ Using the *cursor_factory* parameter, a new default cursor factory will be
+ used by cursor().
+
+ Using *async*=True an asynchronous connection will be created. *async_* is
+ a valid alias (for Python versions where ``async`` is a keyword).
+
+ Any other keyword parameter will be passed to the underlying client
+ library: the list of supported parameters depends on the library version.
+
+ """
+ kwasync = {}
+ if 'async' in kwargs:
+ kwasync['async'] = kwargs.pop('async')
+ if 'async_' in kwargs:
+ kwasync['async_'] = kwargs.pop('async_')
+
+ dsn = _ext.make_dsn(dsn, **kwargs)
+ conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
+ if cursor_factory is not None:
+ conn.cursor_factory = cursor_factory
+
+ return conn
diff --git a/lib/_ipaddress.py b/lib/_ipaddress.py
new file mode 100644
index 0000000..d38566c
--- /dev/null
+++ b/lib/_ipaddress.py
@@ -0,0 +1,90 @@
+"""Implementation of the ipaddres-based network types adaptation
+"""
+
+# psycopg/_ipaddress.py - Ipaddres-based network types adaptation
+#
+# Copyright (C) 2016-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+from psycopg2.extensions import (
+ new_type, new_array_type, register_type, register_adapter, QuotedString)
+
+# The module is imported on register_ipaddress
+ipaddress = None
+
+# The typecasters are created only once
+_casters = None
+
+
+def register_ipaddress(conn_or_curs=None):
+ """
+ Register conversion support between `ipaddress` objects and `network types`__.
+
+ :param conn_or_curs: the scope where to register the type casters.
+ If `!None` register them globally.
+
+ After the function is called, PostgreSQL :sql:`inet` values will be
+ converted into `~ipaddress.IPv4Interface` or `~ipaddress.IPv6Interface`
+ objects, :sql:`cidr` values into into `~ipaddress.IPv4Network` or
+ `~ipaddress.IPv6Network`.
+
+ .. __: https://www.postgresql.org/docs/current/static/datatype-net-types.html
+ """
+ global ipaddress
+ import ipaddress
+
+ global _casters
+ if _casters is None:
+ _casters = _make_casters()
+
+ for c in _casters:
+ register_type(c, conn_or_curs)
+
+ for t in [ipaddress.IPv4Interface, ipaddress.IPv6Interface,
+ ipaddress.IPv4Network, ipaddress.IPv6Network]:
+ register_adapter(t, adapt_ipaddress)
+
+
+def _make_casters():
+ inet = new_type((869,), 'INET', cast_interface)
+ ainet = new_array_type((1041,), 'INET[]', inet)
+
+ cidr = new_type((650,), 'CIDR', cast_network)
+ acidr = new_array_type((651,), 'CIDR[]', cidr)
+
+ return [inet, ainet, cidr, acidr]
+
+
+def cast_interface(s, cur=None):
+ if s is None:
+ return None
+ # Py2 version force the use of unicode. meh.
+ return ipaddress.ip_interface(str(s))
+
+
+def cast_network(s, cur=None):
+ if s is None:
+ return None
+ return ipaddress.ip_network(str(s))
+
+
+def adapt_ipaddress(obj):
+ return QuotedString(str(obj))
diff --git a/lib/_json.py b/lib/_json.py
new file mode 100644
index 0000000..9502422
--- /dev/null
+++ b/lib/_json.py
@@ -0,0 +1,199 @@
+"""Implementation of the JSON adaptation objects
+
+This module exists to avoid a circular import problem: pyscopg2.extras depends
+on psycopg2.extension, so I can't create the default JSON typecasters in
+extensions importing register_json from extras.
+"""
+
+# psycopg/_json.py - Implementation of the JSON adaptation objects
+#
+# Copyright (C) 2012-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+import json
+
+from psycopg2._psycopg import ISQLQuote, QuotedString
+from psycopg2._psycopg import new_type, new_array_type, register_type
+
+
+# oids from PostgreSQL 9.2
+JSON_OID = 114
+JSONARRAY_OID = 199
+
+# oids from PostgreSQL 9.4
+JSONB_OID = 3802
+JSONBARRAY_OID = 3807
+
+
+class Json:
+ """
+ An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to
+ :sql:`json` data type.
+
+ `!Json` can be used to wrap any object supported by the provided *dumps*
+ function. If none is provided, the standard :py:func:`json.dumps()` is
+ used.
+
+ """
+ def __init__(self, adapted, dumps=None):
+ self.adapted = adapted
+ self._conn = None
+ self._dumps = dumps or json.dumps
+
+ def __conform__(self, proto):
+ if proto is ISQLQuote:
+ return self
+
+ def dumps(self, obj):
+ """Serialize *obj* in JSON format.
+
+ The default is to call `!json.dumps()` or the *dumps* function
+ provided in the constructor. You can override this method to create a
+ customized JSON wrapper.
+ """
+ return self._dumps(obj)
+
+ def prepare(self, conn):
+ self._conn = conn
+
+ def getquoted(self):
+ s = self.dumps(self.adapted)
+ qs = QuotedString(s)
+ if self._conn is not None:
+ qs.prepare(self._conn)
+ return qs.getquoted()
+
+ def __str__(self):
+ # getquoted is binary
+ return self.getquoted().decode('ascii', 'replace')
+
+
+def register_json(conn_or_curs=None, globally=False, loads=None,
+ oid=None, array_oid=None, name='json'):
+ """Create and register typecasters converting :sql:`json` type to Python objects.
+
+ :param conn_or_curs: a connection or cursor used to find the :sql:`json`
+ and :sql:`json[]` oids; the typecasters are registered in a scope
+ limited to this object, unless *globally* is set to `!True`. It can be
+ `!None` if the oids are provided
+ :param globally: if `!False` register the typecasters only on
+ *conn_or_curs*, otherwise register them globally
+ :param loads: the function used to parse the data into a Python object. If
+ `!None` use `!json.loads()`, where `!json` is the module chosen
+ according to the Python version (see above)
+ :param oid: the OID of the :sql:`json` type if known; If not, it will be
+ queried on *conn_or_curs*
+ :param array_oid: the OID of the :sql:`json[]` array type if known;
+ if not, it will be queried on *conn_or_curs*
+ :param name: the name of the data type to look for in *conn_or_curs*
+
+ The connection or cursor passed to the function will be used to query the
+ database and look for the OID of the :sql:`json` type (or an alternative
+ type if *name* if provided). No query is performed if *oid* and *array_oid*
+ are provided. Raise `~psycopg2.ProgrammingError` if the type is not found.
+
+ """
+ if oid is None:
+ oid, array_oid = _get_json_oids(conn_or_curs, name)
+
+ JSON, JSONARRAY = _create_json_typecasters(
+ oid, array_oid, loads=loads, name=name.upper())
+
+ register_type(JSON, not globally and conn_or_curs or None)
+
+ if JSONARRAY is not None:
+ register_type(JSONARRAY, not globally and conn_or_curs or None)
+
+ return JSON, JSONARRAY
+
+
+def register_default_json(conn_or_curs=None, globally=False, loads=None):
+ """
+ Create and register :sql:`json` typecasters for PostgreSQL 9.2 and following.
+
+ Since PostgreSQL 9.2 :sql:`json` is a builtin type, hence its oid is known
+ and fixed. This function allows specifying a customized *loads* function
+ for the default :sql:`json` type without querying the database.
+ All the parameters have the same meaning of `register_json()`.
+ """
+ return register_json(conn_or_curs=conn_or_curs, globally=globally,
+ loads=loads, oid=JSON_OID, array_oid=JSONARRAY_OID)
+
+
+def register_default_jsonb(conn_or_curs=None, globally=False, loads=None):
+ """
+ Create and register :sql:`jsonb` typecasters for PostgreSQL 9.4 and following.
+
+ As in `register_default_json()`, the function allows to register a
+ customized *loads* function for the :sql:`jsonb` type at its known oid for
+ PostgreSQL 9.4 and following versions. All the parameters have the same
+ meaning of `register_json()`.
+ """
+ return register_json(conn_or_curs=conn_or_curs, globally=globally,
+ loads=loads, oid=JSONB_OID, array_oid=JSONBARRAY_OID, name='jsonb')
+
+
+def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'):
+ """Create typecasters for json data type."""
+ if loads is None:
+ loads = json.loads
+
+ def typecast_json(s, cur):
+ if s is None:
+ return None
+ return loads(s)
+
+ JSON = new_type((oid, ), name, typecast_json)
+ if array_oid is not None:
+ JSONARRAY = new_array_type((array_oid, ), f"{name}ARRAY", JSON)
+ else:
+ JSONARRAY = None
+
+ return JSON, JSONARRAY
+
+
+def _get_json_oids(conn_or_curs, name='json'):
+ # lazy imports
+ from psycopg2.extensions import STATUS_IN_TRANSACTION
+ from psycopg2.extras import _solve_conn_curs
+
+ conn, curs = _solve_conn_curs(conn_or_curs)
+
+ # Store the transaction status of the connection to revert it after use
+ conn_status = conn.status
+
+ # column typarray not available before PG 8.3
+ typarray = conn.info.server_version >= 80300 and "typarray" or "NULL"
+
+ # get the oid for the hstore
+ curs.execute(
+ "SELECT t.oid, %s FROM pg_type t WHERE t.typname = %%s;"
+ % typarray, (name,))
+ r = curs.fetchone()
+
+ # revert the status of the connection as before the command
+ if conn_status != STATUS_IN_TRANSACTION and not conn.autocommit:
+ conn.rollback()
+
+ if not r:
+ raise conn.ProgrammingError(f"{name} data type not found")
+
+ return r
diff --git a/lib/_range.py b/lib/_range.py
new file mode 100644
index 0000000..19a05d3
--- /dev/null
+++ b/lib/_range.py
@@ -0,0 +1,537 @@
+"""Implementation of the Range type and adaptation
+
+"""
+
+# psycopg/_range.py - Implementation of the Range type and adaptation
+#
+# Copyright (C) 2012-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+import re
+
+from psycopg2._psycopg import ProgrammingError, InterfaceError
+from psycopg2.extensions import ISQLQuote, adapt, register_adapter
+from psycopg2.extensions import new_type, new_array_type, register_type
+
+
+class Range:
+ """Python representation for a PostgreSQL |range|_ type.
+
+ :param lower: lower bound for the range. `!None` means unbound
+ :param upper: upper bound for the range. `!None` means unbound
+ :param bounds: one of the literal strings ``()``, ``[)``, ``(]``, ``[]``,
+ representing whether the lower or upper bounds are included
+ :param empty: if `!True`, the range is empty
+
+ """
+ __slots__ = ('_lower', '_upper', '_bounds')
+
+ def __init__(self, lower=None, upper=None, bounds='[)', empty=False):
+ if not empty:
+ if bounds not in ('[)', '(]', '()', '[]'):
+ raise ValueError(f"bound flags not valid: {bounds!r}")
+
+ self._lower = lower
+ self._upper = upper
+ self._bounds = bounds
+ else:
+ self._lower = self._upper = self._bounds = None
+
+ def __repr__(self):
+ if self._bounds is None:
+ return f"{self.__class__.__name__}(empty=True)"
+ else:
+ return "{}({!r}, {!r}, {!r})".format(self.__class__.__name__,
+ self._lower, self._upper, self._bounds)
+
+ def __str__(self):
+ if self._bounds is None:
+ return 'empty'
+
+ items = [
+ self._bounds[0],
+ str(self._lower),
+ ', ',
+ str(self._upper),
+ self._bounds[1]
+ ]
+ return ''.join(items)
+
+ @property
+ def lower(self):
+ """The lower bound of the range. `!None` if empty or unbound."""
+ return self._lower
+
+ @property
+ def upper(self):
+ """The upper bound of the range. `!None` if empty or unbound."""
+ return self._upper
+
+ @property
+ def isempty(self):
+ """`!True` if the range is empty."""
+ return self._bounds is None
+
+ @property
+ def lower_inf(self):
+ """`!True` if the range doesn't have a lower bound."""
+ if self._bounds is None:
+ return False
+ return self._lower is None
+
+ @property
+ def upper_inf(self):
+ """`!True` if the range doesn't have an upper bound."""
+ if self._bounds is None:
+ return False
+ return self._upper is None
+
+ @property
+ def lower_inc(self):
+ """`!True` if the lower bound is included in the range."""
+ if self._bounds is None or self._lower is None:
+ return False
+ return self._bounds[0] == '['
+
+ @property
+ def upper_inc(self):
+ """`!True` if the upper bound is included in the range."""
+ if self._bounds is None or self._upper is None:
+ return False
+ return self._bounds[1] == ']'
+
+ def __contains__(self, x):
+ if self._bounds is None:
+ return False
+
+ if self._lower is not None:
+ if self._bounds[0] == '[':
+ if x < self._lower:
+ return False
+ else:
+ if x <= self._lower:
+ return False
+
+ if self._upper is not None:
+ if self._bounds[1] == ']':
+ if x > self._upper:
+ return False
+ else:
+ if x >= self._upper:
+ return False
+
+ return True
+
+ def __bool__(self):
+ return self._bounds is not None
+
+ def __nonzero__(self):
+ # Python 2 compatibility
+ return type(self).__bool__(self)
+
+ def __eq__(self, other):
+ if not isinstance(other, Range):
+ return False
+ return (self._lower == other._lower
+ and self._upper == other._upper
+ and self._bounds == other._bounds)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash((self._lower, self._upper, self._bounds))
+
+ # as the postgres docs describe for the server-side stuff,
+ # ordering is rather arbitrary, but will remain stable
+ # and consistent.
+
+ def __lt__(self, other):
+ if not isinstance(other, Range):
+ return NotImplemented
+ for attr in ('_lower', '_upper', '_bounds'):
+ self_value = getattr(self, attr)
+ other_value = getattr(other, attr)
+ if self_value == other_value:
+ pass
+ elif self_value is None:
+ return True
+ elif other_value is None:
+ return False
+ else:
+ return self_value < other_value
+ return False
+
+ def __le__(self, other):
+ if self == other:
+ return True
+ else:
+ return self.__lt__(other)
+
+ def __gt__(self, other):
+ if isinstance(other, Range):
+ return other.__lt__(self)
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if self == other:
+ return True
+ else:
+ return self.__gt__(other)
+
+ def __getstate__(self):
+ return {slot: getattr(self, slot)
+ for slot in self.__slots__ if hasattr(self, slot)}
+
+ def __setstate__(self, state):
+ for slot, value in state.items():
+ setattr(self, slot, value)
+
+
+def register_range(pgrange, pyrange, conn_or_curs, globally=False):
+ """Create and register an adapter and the typecasters to convert between
+ a PostgreSQL |range|_ type and a PostgreSQL `Range` subclass.
+
+ :param pgrange: the name of the PostgreSQL |range| type. Can be
+ schema-qualified
+ :param pyrange: a `Range` strict subclass, or just a name to give to a new
+ class
+ :param conn_or_curs: a connection or cursor used to find the oid of the
+ range and its subtype; the typecaster is registered in a scope limited
+ to this object, unless *globally* is set to `!True`
+ :param globally: if `!False` (default) register the typecaster only on
+ *conn_or_curs*, otherwise register it globally
+ :return: `RangeCaster` instance responsible for the conversion
+
+ If a string is passed to *pyrange*, a new `Range` subclass is created
+ with such name and will be available as the `~RangeCaster.range` attribute
+ of the returned `RangeCaster` object.
+
+ The function queries the database on *conn_or_curs* to inspect the
+ *pgrange* type and raises `~psycopg2.ProgrammingError` if the type is not
+ found. If querying the database is not advisable, use directly the
+ `RangeCaster` class and register the adapter and typecasters using the
+ provided functions.
+
+ """
+ caster = RangeCaster._from_db(pgrange, pyrange, conn_or_curs)
+ caster._register(not globally and conn_or_curs or None)
+ return caster
+
+
+class RangeAdapter:
+ """`ISQLQuote` adapter for `Range` subclasses.
+
+ This is an abstract class: concrete classes must set a `name` class
+ attribute or override `getquoted()`.
+ """
+ name = None
+
+ def __init__(self, adapted):
+ self.adapted = adapted
+
+ def __conform__(self, proto):
+ if self._proto is ISQLQuote:
+ return self
+
+ def prepare(self, conn):
+ self._conn = conn
+
+ def getquoted(self):
+ if self.name is None:
+ raise NotImplementedError(
+ 'RangeAdapter must be subclassed overriding its name '
+ 'or the getquoted() method')
+
+ r = self.adapted
+ if r.isempty:
+ return b"'empty'::" + self.name.encode('utf8')
+
+ if r.lower is not None:
+ a = adapt(r.lower)
+ if hasattr(a, 'prepare'):
+ a.prepare(self._conn)
+ lower = a.getquoted()
+ else:
+ lower = b'NULL'
+
+ if r.upper is not None:
+ a = adapt(r.upper)
+ if hasattr(a, 'prepare'):
+ a.prepare(self._conn)
+ upper = a.getquoted()
+ else:
+ upper = b'NULL'
+
+ return self.name.encode('utf8') + b'(' + lower + b', ' + upper \
+ + b", '" + r._bounds.encode('utf8') + b"')"
+
+
+class RangeCaster:
+ """Helper class to convert between `Range` and PostgreSQL range types.
+
+ Objects of this class are usually created by `register_range()`. Manual
+ creation could be useful if querying the database is not advisable: in
+ this case the oids must be provided.
+ """
+ def __init__(self, pgrange, pyrange, oid, subtype_oid, array_oid=None):
+ self.subtype_oid = subtype_oid
+ self._create_ranges(pgrange, pyrange)
+
+ name = self.adapter.name or self.adapter.__class__.__name__
+
+ self.typecaster = new_type((oid,), name, self.parse)
+
+ if array_oid is not None:
+ self.array_typecaster = new_array_type(
+ (array_oid,), name + "ARRAY", self.typecaster)
+ else:
+ self.array_typecaster = None
+
+ def _create_ranges(self, pgrange, pyrange):
+ """Create Range and RangeAdapter classes if needed."""
+ # if got a string create a new RangeAdapter concrete type (with a name)
+ # else take it as an adapter. Passing an adapter should be considered
+ # an implementation detail and is not documented. It is currently used
+ # for the numeric ranges.
+ self.adapter = None
+ if isinstance(pgrange, str):
+ self.adapter = type(pgrange, (RangeAdapter,), {})
+ self.adapter.name = pgrange
+ else:
+ try:
+ if issubclass(pgrange, RangeAdapter) \
+ and pgrange is not RangeAdapter:
+ self.adapter = pgrange
+ except TypeError:
+ pass
+
+ if self.adapter is None:
+ raise TypeError(
+ 'pgrange must be a string or a RangeAdapter strict subclass')
+
+ self.range = None
+ try:
+ if isinstance(pyrange, str):
+ self.range = type(pyrange, (Range,), {})
+ if issubclass(pyrange, Range) and pyrange is not Range:
+ self.range = pyrange
+ except TypeError:
+ pass
+
+ if self.range is None:
+ raise TypeError(
+ 'pyrange must be a type or a Range strict subclass')
+
+ @classmethod
+ def _from_db(self, name, pyrange, conn_or_curs):
+ """Return a `RangeCaster` instance for the type *pgrange*.
+
+ Raise `ProgrammingError` if the type is not found.
+ """
+ from psycopg2.extensions import STATUS_IN_TRANSACTION
+ from psycopg2.extras import _solve_conn_curs
+ conn, curs = _solve_conn_curs(conn_or_curs)
+
+ if conn.info.server_version < 90200:
+ raise ProgrammingError("range types not available in version %s"
+ % conn.info.server_version)
+
+ # Store the transaction status of the connection to revert it after use
+ conn_status = conn.status
+
+ # Use the correct schema
+ if '.' in name:
+ schema, tname = name.split('.', 1)
+ else:
+ tname = name
+ schema = 'public'
+
+ # get the type oid and attributes
+ try:
+ curs.execute("""\
+select rngtypid, rngsubtype,
+ (select typarray from pg_type where oid = rngtypid)
+from pg_range r
+join pg_type t on t.oid = rngtypid
+join pg_namespace ns on ns.oid = typnamespace
+where typname = %s and ns.nspname = %s;
+""", (tname, schema))
+
+ except ProgrammingError:
+ if not conn.autocommit:
+ conn.rollback()
+ raise
+ else:
+ rec = curs.fetchone()
+
+ # revert the status of the connection as before the command
+ if (conn_status != STATUS_IN_TRANSACTION
+ and not conn.autocommit):
+ conn.rollback()
+
+ if not rec:
+ raise ProgrammingError(
+ f"PostgreSQL type '{name}' not found")
+
+ type, subtype, array = rec
+
+ return RangeCaster(name, pyrange,
+ oid=type, subtype_oid=subtype, array_oid=array)
+
+ _re_range = re.compile(r"""
+ ( \(|\[ ) # lower bound flag
+ (?: # lower bound:
+ " ( (?: [^"] | "")* ) " # - a quoted string
+ | ( [^",]+ ) # - or an unquoted string
+ )? # - or empty (not catched)
+ ,
+ (?: # upper bound:
+ " ( (?: [^"] | "")* ) " # - a quoted string
+ | ( [^"\)\]]+ ) # - or an unquoted string
+ )? # - or empty (not catched)
+ ( \)|\] ) # upper bound flag
+ """, re.VERBOSE)
+
+ _re_undouble = re.compile(r'(["\\])\1')
+
+ def parse(self, s, cur=None):
+ if s is None:
+ return None
+
+ if s == 'empty':
+ return self.range(empty=True)
+
+ m = self._re_range.match(s)
+ if m is None:
+ raise InterfaceError(f"failed to parse range: '{s}'")
+
+ lower = m.group(3)
+ if lower is None:
+ lower = m.group(2)
+ if lower is not None:
+ lower = self._re_undouble.sub(r"\1", lower)
+
+ upper = m.group(5)
+ if upper is None:
+ upper = m.group(4)
+ if upper is not None:
+ upper = self._re_undouble.sub(r"\1", upper)
+
+ if cur is not None:
+ lower = cur.cast(self.subtype_oid, lower)
+ upper = cur.cast(self.subtype_oid, upper)
+
+ bounds = m.group(1) + m.group(6)
+
+ return self.range(lower, upper, bounds)
+
+ def _register(self, scope=None):
+ register_type(self.typecaster, scope)
+ if self.array_typecaster is not None:
+ register_type(self.array_typecaster, scope)
+
+ register_adapter(self.range, self.adapter)
+
+
+class NumericRange(Range):
+ """A `Range` suitable to pass Python numeric types to a PostgreSQL range.
+
+ PostgreSQL types :sql:`int4range`, :sql:`int8range`, :sql:`numrange` are
+ casted into `!NumericRange` instances.
+ """
+ pass
+
+
+class DateRange(Range):
+ """Represents :sql:`daterange` values."""
+ pass
+
+
+class DateTimeRange(Range):
+ """Represents :sql:`tsrange` values."""
+ pass
+
+
+class DateTimeTZRange(Range):
+ """Represents :sql:`tstzrange` values."""
+ pass
+
+
+# Special adaptation for NumericRange. Allows to pass number range regardless
+# of whether they are ints, floats and what size of ints are, which are
+# pointless in Python world. On the way back, no numeric range is casted to
+# NumericRange, but only to their subclasses
+
+class NumberRangeAdapter(RangeAdapter):
+ """Adapt a range if the subtype doesn't need quotes."""
+ def getquoted(self):
+ r = self.adapted
+ if r.isempty:
+ return b"'empty'"
+
+ if not r.lower_inf:
+ # not exactly: we are relying that none of these object is really
+ # quoted (they are numbers). Also, I'm lazy and not preparing the
+ # adapter because I assume encoding doesn't matter for these
+ # objects.
+ lower = adapt(r.lower).getquoted().decode('ascii')
+ else:
+ lower = ''
+
+ if not r.upper_inf:
+ upper = adapt(r.upper).getquoted().decode('ascii')
+ else:
+ upper = ''
+
+ return (f"'{r._bounds[0]}{lower},{upper}{r._bounds[1]}'").encode('ascii')
+
+
+# TODO: probably won't work with infs, nans and other tricky cases.
+register_adapter(NumericRange, NumberRangeAdapter)
+
+# Register globally typecasters and adapters for builtin range types.
+
+# note: the adapter is registered more than once, but this is harmless.
+int4range_caster = RangeCaster(NumberRangeAdapter, NumericRange,
+ oid=3904, subtype_oid=23, array_oid=3905)
+int4range_caster._register()
+
+int8range_caster = RangeCaster(NumberRangeAdapter, NumericRange,
+ oid=3926, subtype_oid=20, array_oid=3927)
+int8range_caster._register()
+
+numrange_caster = RangeCaster(NumberRangeAdapter, NumericRange,
+ oid=3906, subtype_oid=1700, array_oid=3907)
+numrange_caster._register()
+
+daterange_caster = RangeCaster('daterange', DateRange,
+ oid=3912, subtype_oid=1082, array_oid=3913)
+daterange_caster._register()
+
+tsrange_caster = RangeCaster('tsrange', DateTimeRange,
+ oid=3908, subtype_oid=1114, array_oid=3909)
+tsrange_caster._register()
+
+tstzrange_caster = RangeCaster('tstzrange', DateTimeTZRange,
+ oid=3910, subtype_oid=1184, array_oid=3911)
+tstzrange_caster._register()
diff --git a/lib/errorcodes.py b/lib/errorcodes.py
new file mode 100644
index 0000000..d511f1c
--- /dev/null
+++ b/lib/errorcodes.py
@@ -0,0 +1,447 @@
+"""Error codes for PostgreSQL
+
+This module contains symbolic names for all PostgreSQL error codes.
+"""
+# psycopg2/errorcodes.py - PostgreSQL error codes
+#
+# Copyright (C) 2006-2019 Johan Dahlin <jdahlin@async.com.br>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+#
+# Based on:
+#
+# https://www.postgresql.org/docs/current/static/errcodes-appendix.html
+#
+
+
+def lookup(code, _cache={}):
+ """Lookup an error code or class code and return its symbolic name.
+
+ Raise `KeyError` if the code is not found.
+ """
+ if _cache:
+ return _cache[code]
+
+ # Generate the lookup map at first usage.
+ tmp = {}
+ for k, v in globals().items():
+ if isinstance(v, str) and len(v) in (2, 5):
+ # Strip trailing underscore used to disambiguate duplicate values
+ tmp[v] = k.rstrip("_")
+
+ assert tmp
+
+ # Atomic update, to avoid race condition on import (bug #382)
+ _cache.update(tmp)
+
+ return _cache[code]
+
+
+# autogenerated data: do not edit below this point.
+
+# Error classes
+CLASS_SUCCESSFUL_COMPLETION = '00'
+CLASS_WARNING = '01'
+CLASS_NO_DATA = '02'
+CLASS_SQL_STATEMENT_NOT_YET_COMPLETE = '03'
+CLASS_CONNECTION_EXCEPTION = '08'
+CLASS_TRIGGERED_ACTION_EXCEPTION = '09'
+CLASS_FEATURE_NOT_SUPPORTED = '0A'
+CLASS_INVALID_TRANSACTION_INITIATION = '0B'
+CLASS_LOCATOR_EXCEPTION = '0F'
+CLASS_INVALID_GRANTOR = '0L'
+CLASS_INVALID_ROLE_SPECIFICATION = '0P'
+CLASS_DIAGNOSTICS_EXCEPTION = '0Z'
+CLASS_CASE_NOT_FOUND = '20'
+CLASS_CARDINALITY_VIOLATION = '21'
+CLASS_DATA_EXCEPTION = '22'
+CLASS_INTEGRITY_CONSTRAINT_VIOLATION = '23'
+CLASS_INVALID_CURSOR_STATE = '24'
+CLASS_INVALID_TRANSACTION_STATE = '25'
+CLASS_INVALID_SQL_STATEMENT_NAME = '26'
+CLASS_TRIGGERED_DATA_CHANGE_VIOLATION = '27'
+CLASS_INVALID_AUTHORIZATION_SPECIFICATION = '28'
+CLASS_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B'
+CLASS_INVALID_TRANSACTION_TERMINATION = '2D'
+CLASS_SQL_ROUTINE_EXCEPTION = '2F'
+CLASS_INVALID_CURSOR_NAME = '34'
+CLASS_EXTERNAL_ROUTINE_EXCEPTION = '38'
+CLASS_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39'
+CLASS_SAVEPOINT_EXCEPTION = '3B'
+CLASS_INVALID_CATALOG_NAME = '3D'
+CLASS_INVALID_SCHEMA_NAME = '3F'
+CLASS_TRANSACTION_ROLLBACK = '40'
+CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42'
+CLASS_WITH_CHECK_OPTION_VIOLATION = '44'
+CLASS_INSUFFICIENT_RESOURCES = '53'
+CLASS_PROGRAM_LIMIT_EXCEEDED = '54'
+CLASS_OBJECT_NOT_IN_PREREQUISITE_STATE = '55'
+CLASS_OPERATOR_INTERVENTION = '57'
+CLASS_SYSTEM_ERROR = '58'
+CLASS_SNAPSHOT_FAILURE = '72'
+CLASS_CONFIGURATION_FILE_ERROR = 'F0'
+CLASS_FOREIGN_DATA_WRAPPER_ERROR = 'HV'
+CLASS_PL_PGSQL_ERROR = 'P0'
+CLASS_INTERNAL_ERROR = 'XX'
+
+# Class 00 - Successful Completion
+SUCCESSFUL_COMPLETION = '00000'
+
+# Class 01 - Warning
+WARNING = '01000'
+NULL_VALUE_ELIMINATED_IN_SET_FUNCTION = '01003'
+STRING_DATA_RIGHT_TRUNCATION_ = '01004'
+PRIVILEGE_NOT_REVOKED = '01006'
+PRIVILEGE_NOT_GRANTED = '01007'
+IMPLICIT_ZERO_BIT_PADDING = '01008'
+DYNAMIC_RESULT_SETS_RETURNED = '0100C'
+DEPRECATED_FEATURE = '01P01'
+
+# Class 02 - No Data (this is also a warning class per the SQL standard)
+NO_DATA = '02000'
+NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED = '02001'
+
+# Class 03 - SQL Statement Not Yet Complete
+SQL_STATEMENT_NOT_YET_COMPLETE = '03000'
+
+# Class 08 - Connection Exception
+CONNECTION_EXCEPTION = '08000'
+SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION = '08001'
+CONNECTION_DOES_NOT_EXIST = '08003'
+SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION = '08004'
+CONNECTION_FAILURE = '08006'
+TRANSACTION_RESOLUTION_UNKNOWN = '08007'
+PROTOCOL_VIOLATION = '08P01'
+
+# Class 09 - Triggered Action Exception
+TRIGGERED_ACTION_EXCEPTION = '09000'
+
+# Class 0A - Feature Not Supported
+FEATURE_NOT_SUPPORTED = '0A000'
+
+# Class 0B - Invalid Transaction Initiation
+INVALID_TRANSACTION_INITIATION = '0B000'
+
+# Class 0F - Locator Exception
+LOCATOR_EXCEPTION = '0F000'
+INVALID_LOCATOR_SPECIFICATION = '0F001'
+
+# Class 0L - Invalid Grantor
+INVALID_GRANTOR = '0L000'
+INVALID_GRANT_OPERATION = '0LP01'
+
+# Class 0P - Invalid Role Specification
+INVALID_ROLE_SPECIFICATION = '0P000'
+
+# Class 0Z - Diagnostics Exception
+DIAGNOSTICS_EXCEPTION = '0Z000'
+STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER = '0Z002'
+
+# Class 20 - Case Not Found
+CASE_NOT_FOUND = '20000'
+
+# Class 21 - Cardinality Violation
+CARDINALITY_VIOLATION = '21000'
+
+# Class 22 - Data Exception
+DATA_EXCEPTION = '22000'
+STRING_DATA_RIGHT_TRUNCATION = '22001'
+NULL_VALUE_NO_INDICATOR_PARAMETER = '22002'
+NUMERIC_VALUE_OUT_OF_RANGE = '22003'
+NULL_VALUE_NOT_ALLOWED_ = '22004'
+ERROR_IN_ASSIGNMENT = '22005'
+INVALID_DATETIME_FORMAT = '22007'
+DATETIME_FIELD_OVERFLOW = '22008'
+INVALID_TIME_ZONE_DISPLACEMENT_VALUE = '22009'
+ESCAPE_CHARACTER_CONFLICT = '2200B'
+INVALID_USE_OF_ESCAPE_CHARACTER = '2200C'
+INVALID_ESCAPE_OCTET = '2200D'
+ZERO_LENGTH_CHARACTER_STRING = '2200F'
+MOST_SPECIFIC_TYPE_MISMATCH = '2200G'
+SEQUENCE_GENERATOR_LIMIT_EXCEEDED = '2200H'
+NOT_AN_XML_DOCUMENT = '2200L'
+INVALID_XML_DOCUMENT = '2200M'
+INVALID_XML_CONTENT = '2200N'
+INVALID_XML_COMMENT = '2200S'
+INVALID_XML_PROCESSING_INSTRUCTION = '2200T'
+INVALID_INDICATOR_PARAMETER_VALUE = '22010'
+SUBSTRING_ERROR = '22011'
+DIVISION_BY_ZERO = '22012'
+INVALID_PRECEDING_OR_FOLLOWING_SIZE = '22013'
+INVALID_ARGUMENT_FOR_NTILE_FUNCTION = '22014'
+INTERVAL_FIELD_OVERFLOW = '22015'
+INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION = '22016'
+INVALID_CHARACTER_VALUE_FOR_CAST = '22018'
+INVALID_ESCAPE_CHARACTER = '22019'
+INVALID_REGULAR_EXPRESSION = '2201B'
+INVALID_ARGUMENT_FOR_LOGARITHM = '2201E'
+INVALID_ARGUMENT_FOR_POWER_FUNCTION = '2201F'
+INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION = '2201G'
+INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W'
+INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X'
+INVALID_LIMIT_VALUE = '22020'
+CHARACTER_NOT_IN_REPERTOIRE = '22021'
+INDICATOR_OVERFLOW = '22022'
+INVALID_PARAMETER_VALUE = '22023'
+UNTERMINATED_C_STRING = '22024'
+INVALID_ESCAPE_SEQUENCE = '22025'
+STRING_DATA_LENGTH_MISMATCH = '22026'
+TRIM_ERROR = '22027'
+ARRAY_SUBSCRIPT_ERROR = '2202E'
+INVALID_TABLESAMPLE_REPEAT = '2202G'
+INVALID_TABLESAMPLE_ARGUMENT = '2202H'
+DUPLICATE_JSON_OBJECT_KEY_VALUE = '22030'
+INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION = '22031'
+INVALID_JSON_TEXT = '22032'
+INVALID_SQL_JSON_SUBSCRIPT = '22033'
+MORE_THAN_ONE_SQL_JSON_ITEM = '22034'
+NO_SQL_JSON_ITEM = '22035'
+NON_NUMERIC_SQL_JSON_ITEM = '22036'
+NON_UNIQUE_KEYS_IN_A_JSON_OBJECT = '22037'
+SINGLETON_SQL_JSON_ITEM_REQUIRED = '22038'
+SQL_JSON_ARRAY_NOT_FOUND = '22039'
+SQL_JSON_MEMBER_NOT_FOUND = '2203A'
+SQL_JSON_NUMBER_NOT_FOUND = '2203B'
+SQL_JSON_OBJECT_NOT_FOUND = '2203C'
+TOO_MANY_JSON_ARRAY_ELEMENTS = '2203D'
+TOO_MANY_JSON_OBJECT_MEMBERS = '2203E'
+SQL_JSON_SCALAR_REQUIRED = '2203F'
+FLOATING_POINT_EXCEPTION = '22P01'
+INVALID_TEXT_REPRESENTATION = '22P02'
+INVALID_BINARY_REPRESENTATION = '22P03'
+BAD_COPY_FILE_FORMAT = '22P04'
+UNTRANSLATABLE_CHARACTER = '22P05'
+NONSTANDARD_USE_OF_ESCAPE_CHARACTER = '22P06'
+
+# Class 23 - Integrity Constraint Violation
+INTEGRITY_CONSTRAINT_VIOLATION = '23000'
+RESTRICT_VIOLATION = '23001'
+NOT_NULL_VIOLATION = '23502'
+FOREIGN_KEY_VIOLATION = '23503'
+UNIQUE_VIOLATION = '23505'
+CHECK_VIOLATION = '23514'
+EXCLUSION_VIOLATION = '23P01'
+
+# Class 24 - Invalid Cursor State
+INVALID_CURSOR_STATE = '24000'
+
+# Class 25 - Invalid Transaction State
+INVALID_TRANSACTION_STATE = '25000'
+ACTIVE_SQL_TRANSACTION = '25001'
+BRANCH_TRANSACTION_ALREADY_ACTIVE = '25002'
+INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION = '25003'
+INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION = '25004'
+NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION = '25005'
+READ_ONLY_SQL_TRANSACTION = '25006'
+SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED = '25007'
+HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008'
+NO_ACTIVE_SQL_TRANSACTION = '25P01'
+IN_FAILED_SQL_TRANSACTION = '25P02'
+IDLE_IN_TRANSACTION_SESSION_TIMEOUT = '25P03'
+
+# Class 26 - Invalid SQL Statement Name
+INVALID_SQL_STATEMENT_NAME = '26000'
+
+# Class 27 - Triggered Data Change Violation
+TRIGGERED_DATA_CHANGE_VIOLATION = '27000'
+
+# Class 28 - Invalid Authorization Specification
+INVALID_AUTHORIZATION_SPECIFICATION = '28000'
+INVALID_PASSWORD = '28P01'
+
+# Class 2B - Dependent Privilege Descriptors Still Exist
+DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B000'
+DEPENDENT_OBJECTS_STILL_EXIST = '2BP01'
+
+# Class 2D - Invalid Transaction Termination
+INVALID_TRANSACTION_TERMINATION = '2D000'
+
+# Class 2F - SQL Routine Exception
+SQL_ROUTINE_EXCEPTION = '2F000'
+MODIFYING_SQL_DATA_NOT_PERMITTED_ = '2F002'
+PROHIBITED_SQL_STATEMENT_ATTEMPTED_ = '2F003'
+READING_SQL_DATA_NOT_PERMITTED_ = '2F004'
+FUNCTION_EXECUTED_NO_RETURN_STATEMENT = '2F005'
+
+# Class 34 - Invalid Cursor Name
+INVALID_CURSOR_NAME = '34000'
+
+# Class 38 - External Routine Exception
+EXTERNAL_ROUTINE_EXCEPTION = '38000'
+CONTAINING_SQL_NOT_PERMITTED = '38001'
+MODIFYING_SQL_DATA_NOT_PERMITTED = '38002'
+PROHIBITED_SQL_STATEMENT_ATTEMPTED = '38003'
+READING_SQL_DATA_NOT_PERMITTED = '38004'
+
+# Class 39 - External Routine Invocation Exception
+EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39000'
+INVALID_SQLSTATE_RETURNED = '39001'
+NULL_VALUE_NOT_ALLOWED = '39004'
+TRIGGER_PROTOCOL_VIOLATED = '39P01'
+SRF_PROTOCOL_VIOLATED = '39P02'
+EVENT_TRIGGER_PROTOCOL_VIOLATED = '39P03'
+
+# Class 3B - Savepoint Exception
+SAVEPOINT_EXCEPTION = '3B000'
+INVALID_SAVEPOINT_SPECIFICATION = '3B001'
+
+# Class 3D - Invalid Catalog Name
+INVALID_CATALOG_NAME = '3D000'
+
+# Class 3F - Invalid Schema Name
+INVALID_SCHEMA_NAME = '3F000'
+
+# Class 40 - Transaction Rollback
+TRANSACTION_ROLLBACK = '40000'
+SERIALIZATION_FAILURE = '40001'
+TRANSACTION_INTEGRITY_CONSTRAINT_VIOLATION = '40002'
+STATEMENT_COMPLETION_UNKNOWN = '40003'
+DEADLOCK_DETECTED = '40P01'
+
+# Class 42 - Syntax Error or Access Rule Violation
+SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42000'
+INSUFFICIENT_PRIVILEGE = '42501'
+SYNTAX_ERROR = '42601'
+INVALID_NAME = '42602'
+INVALID_COLUMN_DEFINITION = '42611'
+NAME_TOO_LONG = '42622'
+DUPLICATE_COLUMN = '42701'
+AMBIGUOUS_COLUMN = '42702'
+UNDEFINED_COLUMN = '42703'
+UNDEFINED_OBJECT = '42704'
+DUPLICATE_OBJECT = '42710'
+DUPLICATE_ALIAS = '42712'
+DUPLICATE_FUNCTION = '42723'
+AMBIGUOUS_FUNCTION = '42725'
+GROUPING_ERROR = '42803'
+DATATYPE_MISMATCH = '42804'
+WRONG_OBJECT_TYPE = '42809'
+INVALID_FOREIGN_KEY = '42830'
+CANNOT_COERCE = '42846'
+UNDEFINED_FUNCTION = '42883'
+GENERATED_ALWAYS = '428C9'
+RESERVED_NAME = '42939'
+UNDEFINED_TABLE = '42P01'
+UNDEFINED_PARAMETER = '42P02'
+DUPLICATE_CURSOR = '42P03'
+DUPLICATE_DATABASE = '42P04'
+DUPLICATE_PREPARED_STATEMENT = '42P05'
+DUPLICATE_SCHEMA = '42P06'
+DUPLICATE_TABLE = '42P07'
+AMBIGUOUS_PARAMETER = '42P08'
+AMBIGUOUS_ALIAS = '42P09'
+INVALID_COLUMN_REFERENCE = '42P10'
+INVALID_CURSOR_DEFINITION = '42P11'
+INVALID_DATABASE_DEFINITION = '42P12'
+INVALID_FUNCTION_DEFINITION = '42P13'
+INVALID_PREPARED_STATEMENT_DEFINITION = '42P14'
+INVALID_SCHEMA_DEFINITION = '42P15'
+INVALID_TABLE_DEFINITION = '42P16'
+INVALID_OBJECT_DEFINITION = '42P17'
+INDETERMINATE_DATATYPE = '42P18'
+INVALID_RECURSION = '42P19'
+WINDOWING_ERROR = '42P20'
+COLLATION_MISMATCH = '42P21'
+INDETERMINATE_COLLATION = '42P22'
+
+# Class 44 - WITH CHECK OPTION Violation
+WITH_CHECK_OPTION_VIOLATION = '44000'
+
+# Class 53 - Insufficient Resources
+INSUFFICIENT_RESOURCES = '53000'
+DISK_FULL = '53100'
+OUT_OF_MEMORY = '53200'
+TOO_MANY_CONNECTIONS = '53300'
+CONFIGURATION_LIMIT_EXCEEDED = '53400'
+
+# Class 54 - Program Limit Exceeded
+PROGRAM_LIMIT_EXCEEDED = '54000'
+STATEMENT_TOO_COMPLEX = '54001'
+TOO_MANY_COLUMNS = '54011'
+TOO_MANY_ARGUMENTS = '54023'
+
+# Class 55 - Object Not In Prerequisite State
+OBJECT_NOT_IN_PREREQUISITE_STATE = '55000'
+OBJECT_IN_USE = '55006'
+CANT_CHANGE_RUNTIME_PARAM = '55P02'
+LOCK_NOT_AVAILABLE = '55P03'
+UNSAFE_NEW_ENUM_VALUE_USAGE = '55P04'
+
+# Class 57 - Operator Intervention
+OPERATOR_INTERVENTION = '57000'
+QUERY_CANCELED = '57014'
+ADMIN_SHUTDOWN = '57P01'
+CRASH_SHUTDOWN = '57P02'
+CANNOT_CONNECT_NOW = '57P03'
+DATABASE_DROPPED = '57P04'
+
+# Class 58 - System Error (errors external to PostgreSQL itself)
+SYSTEM_ERROR = '58000'
+IO_ERROR = '58030'
+UNDEFINED_FILE = '58P01'
+DUPLICATE_FILE = '58P02'
+
+# Class 72 - Snapshot Failure
+SNAPSHOT_TOO_OLD = '72000'
+
+# Class F0 - Configuration File Error
+CONFIG_FILE_ERROR = 'F0000'
+LOCK_FILE_EXISTS = 'F0001'
+
+# Class HV - Foreign Data Wrapper Error (SQL/MED)
+FDW_ERROR = 'HV000'
+FDW_OUT_OF_MEMORY = 'HV001'
+FDW_DYNAMIC_PARAMETER_VALUE_NEEDED = 'HV002'
+FDW_INVALID_DATA_TYPE = 'HV004'
+FDW_COLUMN_NAME_NOT_FOUND = 'HV005'
+FDW_INVALID_DATA_TYPE_DESCRIPTORS = 'HV006'
+FDW_INVALID_COLUMN_NAME = 'HV007'
+FDW_INVALID_COLUMN_NUMBER = 'HV008'
+FDW_INVALID_USE_OF_NULL_POINTER = 'HV009'
+FDW_INVALID_STRING_FORMAT = 'HV00A'
+FDW_INVALID_HANDLE = 'HV00B'
+FDW_INVALID_OPTION_INDEX = 'HV00C'
+FDW_INVALID_OPTION_NAME = 'HV00D'
+FDW_OPTION_NAME_NOT_FOUND = 'HV00J'
+FDW_REPLY_HANDLE = 'HV00K'
+FDW_UNABLE_TO_CREATE_EXECUTION = 'HV00L'
+FDW_UNABLE_TO_CREATE_REPLY = 'HV00M'
+FDW_UNABLE_TO_ESTABLISH_CONNECTION = 'HV00N'
+FDW_NO_SCHEMAS = 'HV00P'
+FDW_SCHEMA_NOT_FOUND = 'HV00Q'
+FDW_TABLE_NOT_FOUND = 'HV00R'
+FDW_FUNCTION_SEQUENCE_ERROR = 'HV010'
+FDW_TOO_MANY_HANDLES = 'HV014'
+FDW_INCONSISTENT_DESCRIPTOR_INFORMATION = 'HV021'
+FDW_INVALID_ATTRIBUTE_VALUE = 'HV024'
+FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH = 'HV090'
+FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER = 'HV091'
+
+# Class P0 - PL/pgSQL Error
+PLPGSQL_ERROR = 'P0000'
+RAISE_EXCEPTION = 'P0001'
+NO_DATA_FOUND = 'P0002'
+TOO_MANY_ROWS = 'P0003'
+ASSERT_FAILURE = 'P0004'
+
+# Class XX - Internal Error
+INTERNAL_ERROR = 'XX000'
+DATA_CORRUPTED = 'XX001'
+INDEX_CORRUPTED = 'XX002'
diff --git a/lib/errors.py b/lib/errors.py
new file mode 100644
index 0000000..e4e47f5
--- /dev/null
+++ b/lib/errors.py
@@ -0,0 +1,38 @@
+"""Error classes for PostgreSQL error codes
+"""
+
+# psycopg/errors.py - SQLSTATE and DB-API exceptions
+#
+# Copyright (C) 2018-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+#
+# NOTE: the exceptions are injected into this module by the C extention.
+#
+
+
+def lookup(code):
+ """Lookup an error code and return its exception class.
+
+ Raise `!KeyError` if the code is not found.
+ """
+ from psycopg2._psycopg import sqlstate_errors # avoid circular import
+ return sqlstate_errors[code]
diff --git a/lib/extensions.py b/lib/extensions.py
new file mode 100644
index 0000000..b938d0c
--- /dev/null
+++ b/lib/extensions.py
@@ -0,0 +1,213 @@
+"""psycopg extensions to the DBAPI-2.0
+
+This module holds all the extensions to the DBAPI-2.0 provided by psycopg.
+
+- `connection` -- the new-type inheritable connection class
+- `cursor` -- the new-type inheritable cursor class
+- `lobject` -- the new-type inheritable large object class
+- `adapt()` -- exposes the PEP-246_ compatible adapting mechanism used
+ by psycopg to adapt Python types to PostgreSQL ones
+
+.. _PEP-246: https://www.python.org/dev/peps/pep-0246/
+"""
+# psycopg/extensions.py - DBAPI-2.0 extensions specific to psycopg
+#
+# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+import re as _re
+
+from psycopg2._psycopg import ( # noqa
+ BINARYARRAY, BOOLEAN, BOOLEANARRAY, BYTES, BYTESARRAY, DATE, DATEARRAY,
+ DATETIMEARRAY, DECIMAL, DECIMALARRAY, FLOAT, FLOATARRAY, INTEGER,
+ INTEGERARRAY, INTERVAL, INTERVALARRAY, LONGINTEGER, LONGINTEGERARRAY,
+ ROWIDARRAY, STRINGARRAY, TIME, TIMEARRAY, UNICODE, UNICODEARRAY,
+ AsIs, Binary, Boolean, Float, Int, QuotedString, )
+
+from psycopg2._psycopg import ( # noqa
+ PYDATE, PYDATETIME, PYDATETIMETZ, PYINTERVAL, PYTIME, PYDATEARRAY,
+ PYDATETIMEARRAY, PYDATETIMETZARRAY, PYINTERVALARRAY, PYTIMEARRAY,
+ DateFromPy, TimeFromPy, TimestampFromPy, IntervalFromPy, )
+
+from psycopg2._psycopg import ( # noqa
+ adapt, adapters, encodings, connection, cursor,
+ lobject, Xid, libpq_version, parse_dsn, quote_ident,
+ string_types, binary_types, new_type, new_array_type, register_type,
+ ISQLQuote, Notify, Diagnostics, Column, ConnectionInfo,
+ QueryCanceledError, TransactionRollbackError,
+ set_wait_callback, get_wait_callback, encrypt_password, )
+
+
+"""Isolation level values."""
+ISOLATION_LEVEL_AUTOCOMMIT = 0
+ISOLATION_LEVEL_READ_UNCOMMITTED = 4
+ISOLATION_LEVEL_READ_COMMITTED = 1
+ISOLATION_LEVEL_REPEATABLE_READ = 2
+ISOLATION_LEVEL_SERIALIZABLE = 3
+ISOLATION_LEVEL_DEFAULT = None
+
+
+"""psycopg connection status values."""
+STATUS_SETUP = 0
+STATUS_READY = 1
+STATUS_BEGIN = 2
+STATUS_SYNC = 3 # currently unused
+STATUS_ASYNC = 4 # currently unused
+STATUS_PREPARED = 5
+
+# This is a useful mnemonic to check if the connection is in a transaction
+STATUS_IN_TRANSACTION = STATUS_BEGIN
+
+
+"""psycopg asynchronous connection polling values"""
+POLL_OK = 0
+POLL_READ = 1
+POLL_WRITE = 2
+POLL_ERROR = 3
+
+
+"""Backend transaction status values."""
+TRANSACTION_STATUS_IDLE = 0
+TRANSACTION_STATUS_ACTIVE = 1
+TRANSACTION_STATUS_INTRANS = 2
+TRANSACTION_STATUS_INERROR = 3
+TRANSACTION_STATUS_UNKNOWN = 4
+
+
+def register_adapter(typ, callable):
+ """Register 'callable' as an ISQLQuote adapter for type 'typ'."""
+ adapters[(typ, ISQLQuote)] = callable
+
+
+# The SQL_IN class is the official adapter for tuples starting from 2.0.6.
+class SQL_IN:
+ """Adapt any iterable to an SQL quotable object."""
+ def __init__(self, seq):
+ self._seq = seq
+ self._conn = None
+
+ def prepare(self, conn):
+ self._conn = conn
+
+ def getquoted(self):
+ # this is the important line: note how every object in the
+ # list is adapted and then how getquoted() is called on it
+ pobjs = [adapt(o) for o in self._seq]
+ if self._conn is not None:
+ for obj in pobjs:
+ if hasattr(obj, 'prepare'):
+ obj.prepare(self._conn)
+ qobjs = [o.getquoted() for o in pobjs]
+ return b'(' + b', '.join(qobjs) + b')'
+
+ def __str__(self):
+ return str(self.getquoted())
+
+
+class NoneAdapter:
+ """Adapt None to NULL.
+
+ This adapter is not used normally as a fast path in mogrify uses NULL,
+ but it makes easier to adapt composite types.
+ """
+ def __init__(self, obj):
+ pass
+
+ def getquoted(self, _null=b"NULL"):
+ return _null
+
+
+def make_dsn(dsn=None, **kwargs):
+ """Convert a set of keywords into a connection strings."""
+ if dsn is None and not kwargs:
+ return ''
+
+ # If no kwarg is specified don't mung the dsn, but verify it
+ if not kwargs:
+ parse_dsn(dsn)
+ return dsn
+
+ # Override the dsn with the parameters
+ if 'database' in kwargs:
+ if 'dbname' in kwargs:
+ raise TypeError(
+ "you can't specify both 'database' and 'dbname' arguments")
+ kwargs['dbname'] = kwargs.pop('database')
+
+ # Drop the None arguments
+ kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
+
+ if dsn is not None:
+ tmp = parse_dsn(dsn)
+ tmp.update(kwargs)
+ kwargs = tmp
+
+ dsn = " ".join(["{}={}".format(k, _param_escape(str(v)))
+ for (k, v) in kwargs.items()])
+
+ # verify that the returned dsn is valid
+ parse_dsn(dsn)
+
+ return dsn
+
+
+def _param_escape(s,
+ re_escape=_re.compile(r"([\\'])"),
+ re_space=_re.compile(r'\s')):
+ """
+ Apply the escaping rule required by PQconnectdb
+ """
+ if not s:
+ return "''"
+
+ s = re_escape.sub(r'\\\1', s)
+ if re_space.search(s):
+ s = "'" + s + "'"
+
+ return s
+
+
+# Create default json typecasters for PostgreSQL 9.2 oids
+from psycopg2._json import register_default_json, register_default_jsonb # noqa
+
+try:
+ JSON, JSONARRAY = register_default_json()
+ JSONB, JSONBARRAY = register_default_jsonb()
+except ImportError:
+ pass
+
+del register_default_json, register_default_jsonb
+
+
+# Create default Range typecasters
+from psycopg2. _range import Range # noqa
+del Range
+
+
+# Add the "cleaned" version of the encodings to the key.
+# When the encoding is set its name is cleaned up from - and _ and turned
+# uppercase, so an encoding not respecting these rules wouldn't be found in the
+# encodings keys and would raise an exception with the unicode typecaster
+for k, v in list(encodings.items()):
+ k = k.replace('_', '').replace('-', '').upper()
+ encodings[k] = v
+
+del k, v
diff --git a/lib/extras.py b/lib/extras.py
new file mode 100644
index 0000000..f921d2d
--- /dev/null
+++ b/lib/extras.py
@@ -0,0 +1,1306 @@
+"""Miscellaneous goodies for psycopg2
+
+This module is a generic place used to hold little helper functions
+and classes until a better place in the distribution is found.
+"""
+# psycopg/extras.py - miscellaneous extra goodies for psycopg
+#
+# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+import os as _os
+import time as _time
+import re as _re
+from collections import namedtuple, OrderedDict
+
+import logging as _logging
+
+import psycopg2
+from psycopg2 import extensions as _ext
+from .extensions import cursor as _cursor
+from .extensions import connection as _connection
+from .extensions import adapt as _A, quote_ident
+from functools import lru_cache
+
+from psycopg2._psycopg import ( # noqa
+ REPLICATION_PHYSICAL, REPLICATION_LOGICAL,
+ ReplicationConnection as _replicationConnection,
+ ReplicationCursor as _replicationCursor,
+ ReplicationMessage)
+
+
+# expose the json adaptation stuff into the module
+from psycopg2._json import ( # noqa
+ json, Json, register_json, register_default_json, register_default_jsonb)
+
+
+# Expose range-related objects
+from psycopg2._range import ( # noqa
+ Range, NumericRange, DateRange, DateTimeRange, DateTimeTZRange,
+ register_range, RangeAdapter, RangeCaster)
+
+
+# Expose ipaddress-related objects
+from psycopg2._ipaddress import register_ipaddress # noqa
+
+
+class DictCursorBase(_cursor):
+ """Base class for all dict-like cursors."""
+
+ def __init__(self, *args, **kwargs):
+ if 'row_factory' in kwargs:
+ row_factory = kwargs['row_factory']
+ del kwargs['row_factory']
+ else:
+ raise NotImplementedError(
+ "DictCursorBase can't be instantiated without a row factory.")
+ super().__init__(*args, **kwargs)
+ self._query_executed = False
+ self._prefetch = False
+ self.row_factory = row_factory
+
+ def fetchone(self):
+ if self._prefetch:
+ res = super().fetchone()
+ if self._query_executed:
+ self._build_index()
+ if not self._prefetch:
+ res = super().fetchone()
+ return res
+
+ def fetchmany(self, size=None):
+ if self._prefetch:
+ res = super().fetchmany(size)
+ if self._query_executed:
+ self._build_index()
+ if not self._prefetch:
+ res = super().fetchmany(size)
+ return res
+
+ def fetchall(self):
+ if self._prefetch:
+ res = super().fetchall()
+ if self._query_executed:
+ self._build_index()
+ if not self._prefetch:
+ res = super().fetchall()
+ return res
+
+ def __iter__(self):
+ try:
+ if self._prefetch:
+ res = super().__iter__()
+ first = next(res)
+ if self._query_executed:
+ self._build_index()
+ if not self._prefetch:
+ res = super().__iter__()
+ first = next(res)
+
+ yield first
+ while True:
+ yield next(res)
+ except StopIteration:
+ return
+
+
+class DictConnection(_connection):
+ """A connection that uses `DictCursor` automatically."""
+ def cursor(self, *args, **kwargs):
+ kwargs.setdefault('cursor_factory', self.cursor_factory or DictCursor)
+ return super().cursor(*args, **kwargs)
+
+
+class DictCursor(DictCursorBase):
+ """A cursor that keeps a list of column name -> index mappings__.
+
+ .. __: https://docs.python.org/glossary.html#term-mapping
+ """
+
+ def __init__(self, *args, **kwargs):
+ kwargs['row_factory'] = DictRow
+ super().__init__(*args, **kwargs)
+ self._prefetch = True
+
+ def execute(self, query, vars=None):
+ self.index = OrderedDict()
+ self._query_executed = True
+ return super().execute(query, vars)
+
+ def callproc(self, procname, vars=None):
+ self.index = OrderedDict()
+ self._query_executed = True
+ return super().callproc(procname, vars)
+
+ def _build_index(self):
+ if self._query_executed and self.description:
+ for i in range(len(self.description)):
+ self.index[self.description[i][0]] = i
+ self._query_executed = False
+
+
+class DictRow(list):
+ """A row object that allow by-column-name access to data."""
+
+ __slots__ = ('_index',)
+
+ def __init__(self, cursor):
+ self._index = cursor.index
+ self[:] = [None] * len(cursor.description)
+
+ def __getitem__(self, x):
+ if not isinstance(x, (int, slice)):
+ x = self._index[x]
+ return super().__getitem__(x)
+
+ def __setitem__(self, x, v):
+ if not isinstance(x, (int, slice)):
+ x = self._index[x]
+ super().__setitem__(x, v)
+
+ def items(self):
+ g = super().__getitem__
+ return ((n, g(self._index[n])) for n in self._index)
+
+ def keys(self):
+ return iter(self._index)
+
+ def values(self):
+ g = super().__getitem__
+ return (g(self._index[n]) for n in self._index)
+
+ def get(self, x, default=None):
+ try:
+ return self[x]
+ except Exception:
+ return default
+
+ def copy(self):
+ return OrderedDict(self.items())
+
+ def __contains__(self, x):
+ return x in self._index
+
+ def __reduce__(self):
+ # this is apparently useless, but it fixes #1073
+ return super().__reduce__()
+
+ def __getstate__(self):
+ return self[:], self._index.copy()
+
+ def __setstate__(self, data):
+ self[:] = data[0]
+ self._index = data[1]
+
+
+class RealDictConnection(_connection):
+ """A connection that uses `RealDictCursor` automatically."""
+ def cursor(self, *args, **kwargs):
+ kwargs.setdefault('cursor_factory', self.cursor_factory or RealDictCursor)
+ return super().cursor(*args, **kwargs)
+
+
+class RealDictCursor(DictCursorBase):
+ """A cursor that uses a real dict as the base type for rows.
+
+ Note that this cursor is extremely specialized and does not allow
+ the normal access (using integer indices) to fetched data. If you need
+ to access database rows both as a dictionary and a list, then use
+ the generic `DictCursor` instead of `!RealDictCursor`.
+ """
+ def __init__(self, *args, **kwargs):
+ kwargs['row_factory'] = RealDictRow
+ super().__init__(*args, **kwargs)
+
+ def execute(self, query, vars=None):
+ self.column_mapping = []
+ self._query_executed = True
+ return super().execute(query, vars)
+
+ def callproc(self, procname, vars=None):
+ self.column_mapping = []
+ self._query_executed = True
+ return super().callproc(procname, vars)
+
+ def _build_index(self):
+ if self._query_executed and self.description:
+ self.column_mapping = [d[0] for d in self.description]
+ self._query_executed = False
+
+
+class RealDictRow(OrderedDict):
+ """A `!dict` subclass representing a data record."""
+
+ def __init__(self, *args, **kwargs):
+ if args and isinstance(args[0], _cursor):
+ cursor = args[0]
+ args = args[1:]
+ else:
+ cursor = None
+
+ super().__init__(*args, **kwargs)
+
+ if cursor is not None:
+ # Required for named cursors
+ if cursor.description and not cursor.column_mapping:
+ cursor._build_index()
+
+ # Store the cols mapping in the dict itself until the row is fully
+ # populated, so we don't need to add attributes to the class
+ # (hence keeping its maintenance, special pickle support, etc.)
+ self[RealDictRow] = cursor.column_mapping
+
+ def __setitem__(self, key, value):
+ if RealDictRow in self:
+ # We are in the row building phase
+ mapping = self[RealDictRow]
+ super().__setitem__(mapping[key], value)
+ if key == len(mapping) - 1:
+ # Row building finished
+ del self[RealDictRow]
+ return
+
+ super().__setitem__(key, value)
+
+
+class NamedTupleConnection(_connection):
+ """A connection that uses `NamedTupleCursor` automatically."""
+ def cursor(self, *args, **kwargs):
+ kwargs.setdefault('cursor_factory', self.cursor_factory or NamedTupleCursor)
+ return super().cursor(*args, **kwargs)
+
+
+class NamedTupleCursor(_cursor):
+ """A cursor that generates results as `~collections.namedtuple`.
+
+ `!fetch*()` methods will return named tuples instead of regular tuples, so
+ their elements can be accessed both as regular numeric items as well as
+ attributes.
+
+ >>> nt_cur = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
+ >>> rec = nt_cur.fetchone()
+ >>> rec
+ Record(id=1, num=100, data="abc'def")
+ >>> rec[1]
+ 100
+ >>> rec.data
+ "abc'def"
+ """
+ Record = None
+ MAX_CACHE = 1024
+
+ def execute(self, query, vars=None):
+ self.Record = None
+ return super().execute(query, vars)
+
+ def executemany(self, query, vars):
+ self.Record = None
+ return super().executemany(query, vars)
+
+ def callproc(self, procname, vars=None):
+ self.Record = None
+ return super().callproc(procname, vars)
+
+ def fetchone(self):
+ t = super().fetchone()
+ if t is not None:
+ nt = self.Record
+ if nt is None:
+ nt = self.Record = self._make_nt()
+ return nt._make(t)
+
+ def fetchmany(self, size=None):
+ ts = super().fetchmany(size)
+ nt = self.Record
+ if nt is None:
+ nt = self.Record = self._make_nt()
+ return list(map(nt._make, ts))
+
+ def fetchall(self):
+ ts = super().fetchall()
+ nt = self.Record
+ if nt is None:
+ nt = self.Record = self._make_nt()
+ return list(map(nt._make, ts))
+
+ def __iter__(self):
+ try:
+ it = super().__iter__()
+ t = next(it)
+
+ nt = self.Record
+ if nt is None:
+ nt = self.Record = self._make_nt()
+
+ yield nt._make(t)
+
+ while True:
+ yield nt._make(next(it))
+ except StopIteration:
+ return
+
+ # ascii except alnum and underscore
+ _re_clean = _re.compile(
+ '[' + _re.escape(' !"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~') + ']')
+
+ def _make_nt(self):
+ key = tuple(d[0] for d in self.description) if self.description else ()
+ return self._cached_make_nt(key)
+
+ @classmethod
+ def _do_make_nt(cls, key):
+ fields = []
+ for s in key:
+ s = cls._re_clean.sub('_', s)
+ # Python identifier cannot start with numbers, namedtuple fields
+ # cannot start with underscore. So...
+ if s[0] == '_' or '0' <= s[0] <= '9':
+ s = 'f' + s
+ fields.append(s)
+
+ nt = namedtuple("Record", fields)
+ return nt
+
+
+@lru_cache(512)
+def _cached_make_nt(cls, key):
+ return cls._do_make_nt(key)
+
+
+# Exposed for testability, and if someone wants to monkeypatch to tweak
+# the cache size.
+NamedTupleCursor._cached_make_nt = classmethod(_cached_make_nt)
+
+
+class LoggingConnection(_connection):
+ """A connection that logs all queries to a file or logger__ object.
+
+ .. __: https://docs.python.org/library/logging.html
+ """
+
+ def initialize(self, logobj):
+ """Initialize the connection to log to `!logobj`.
+
+ The `!logobj` parameter can be an open file object or a Logger/LoggerAdapter
+ instance from the standard logging module.
+ """
+ self._logobj = logobj
+ if _logging and isinstance(
+ logobj, (_logging.Logger, _logging.LoggerAdapter)):
+ self.log = self._logtologger
+ else:
+ self.log = self._logtofile
+
+ def filter(self, msg, curs):
+ """Filter the query before logging it.
+
+ This is the method to overwrite to filter unwanted queries out of the
+ log or to add some extra data to the output. The default implementation
+ just does nothing.
+ """
+ return msg
+
+ def _logtofile(self, msg, curs):
+ msg = self.filter(msg, curs)
+ if msg:
+ if isinstance(msg, bytes):
+ msg = msg.decode(_ext.encodings[self.encoding], 'replace')
+ self._logobj.write(msg + _os.linesep)
+
+ def _logtologger(self, msg, curs):
+ msg = self.filter(msg, curs)
+ if msg:
+ self._logobj.debug(msg)
+
+ def _check(self):
+ if not hasattr(self, '_logobj'):
+ raise self.ProgrammingError(
+ "LoggingConnection object has not been initialize()d")
+
+ def cursor(self, *args, **kwargs):
+ self._check()
+ kwargs.setdefault('cursor_factory', self.cursor_factory or LoggingCursor)
+ return super().cursor(*args, **kwargs)
+
+
+class LoggingCursor(_cursor):
+ """A cursor that logs queries using its connection logging facilities."""
+
+ def execute(self, query, vars=None):
+ try:
+ return super().execute(query, vars)
+ finally:
+ self.connection.log(self.query, self)
+
+ def callproc(self, procname, vars=None):
+ try:
+ return super().callproc(procname, vars)
+ finally:
+ self.connection.log(self.query, self)
+
+
+class MinTimeLoggingConnection(LoggingConnection):
+ """A connection that logs queries based on execution time.
+
+ This is just an example of how to sub-class `LoggingConnection` to
+ provide some extra filtering for the logged queries. Both the
+ `initialize()` and `filter()` methods are overwritten to make sure
+ that only queries executing for more than ``mintime`` ms are logged.
+
+ Note that this connection uses the specialized cursor
+ `MinTimeLoggingCursor`.
+ """
+ def initialize(self, logobj, mintime=0):
+ LoggingConnection.initialize(self, logobj)
+ self._mintime = mintime
+
+ def filter(self, msg, curs):
+ t = (_time.time() - curs.timestamp) * 1000
+ if t > self._mintime:
+ if isinstance(msg, bytes):
+ msg = msg.decode(_ext.encodings[self.encoding], 'replace')
+ return f"{msg}{_os.linesep} (execution time: {t} ms)"
+
+ def cursor(self, *args, **kwargs):
+ kwargs.setdefault('cursor_factory',
+ self.cursor_factory or MinTimeLoggingCursor)
+ return LoggingConnection.cursor(self, *args, **kwargs)
+
+
+class MinTimeLoggingCursor(LoggingCursor):
+ """The cursor sub-class companion to `MinTimeLoggingConnection`."""
+
+ def execute(self, query, vars=None):
+ self.timestamp = _time.time()
+ return LoggingCursor.execute(self, query, vars)
+
+ def callproc(self, procname, vars=None):
+ self.timestamp = _time.time()
+ return LoggingCursor.callproc(self, procname, vars)
+
+
+class LogicalReplicationConnection(_replicationConnection):
+
+ def __init__(self, *args, **kwargs):
+ kwargs['replication_type'] = REPLICATION_LOGICAL
+ super().__init__(*args, **kwargs)
+
+
+class PhysicalReplicationConnection(_replicationConnection):
+
+ def __init__(self, *args, **kwargs):
+ kwargs['replication_type'] = REPLICATION_PHYSICAL
+ super().__init__(*args, **kwargs)
+
+
+class StopReplication(Exception):
+ """
+ Exception used to break out of the endless loop in
+ `~ReplicationCursor.consume_stream()`.
+
+ Subclass of `~exceptions.Exception`. Intentionally *not* inherited from
+ `~psycopg2.Error` as occurrence of this exception does not indicate an
+ error.
+ """
+ pass
+
+
+class ReplicationCursor(_replicationCursor):
+ """A cursor used for communication on replication connections."""
+
+ def create_replication_slot(self, slot_name, slot_type=None, output_plugin=None):
+ """Create streaming replication slot."""
+
+ command = f"CREATE_REPLICATION_SLOT {quote_ident(slot_name, self)} "
+
+ if slot_type is None:
+ slot_type = self.connection.replication_type
+
+ if slot_type == REPLICATION_LOGICAL:
+ if output_plugin is None:
+ raise psycopg2.ProgrammingError(
+ "output plugin name is required to create "
+ "logical replication slot")
+
+ command += f"LOGICAL {quote_ident(output_plugin, self)}"
+
+ elif slot_type == REPLICATION_PHYSICAL:
+ if output_plugin is not None:
+ raise psycopg2.ProgrammingError(
+ "cannot specify output plugin name when creating "
+ "physical replication slot")
+
+ command += "PHYSICAL"
+
+ else:
+ raise psycopg2.ProgrammingError(
+ f"unrecognized replication type: {repr(slot_type)}")
+
+ self.execute(command)
+
+ def drop_replication_slot(self, slot_name):
+ """Drop streaming replication slot."""
+
+ command = f"DROP_REPLICATION_SLOT {quote_ident(slot_name, self)}"
+ self.execute(command)
+
+ def start_replication(
+ self, slot_name=None, slot_type=None, start_lsn=0,
+ timeline=0, options=None, decode=False, status_interval=10):
+ """Start replication stream."""
+
+ command = "START_REPLICATION "
+
+ if slot_type is None:
+ slot_type = self.connection.replication_type
+
+ if slot_type == REPLICATION_LOGICAL:
+ if slot_name:
+ command += f"SLOT {quote_ident(slot_name, self)} "
+ else:
+ raise psycopg2.ProgrammingError(
+ "slot name is required for logical replication")
+
+ command += "LOGICAL "
+
+ elif slot_type == REPLICATION_PHYSICAL:
+ if slot_name:
+ command += f"SLOT {quote_ident(slot_name, self)} "
+ # don't add "PHYSICAL", before 9.4 it was just START_REPLICATION XXX/XXX
+
+ else:
+ raise psycopg2.ProgrammingError(
+ f"unrecognized replication type: {repr(slot_type)}")
+
+ if type(start_lsn) is str:
+ lsn = start_lsn.split('/')
+ lsn = f"{int(lsn[0], 16):X}/{int(lsn[1], 16):08X}"
+ else:
+ lsn = f"{start_lsn >> 32 & 4294967295:X}/{start_lsn & 4294967295:08X}"
+
+ command += lsn
+
+ if timeline != 0:
+ if slot_type == REPLICATION_LOGICAL:
+ raise psycopg2.ProgrammingError(
+ "cannot specify timeline for logical replication")
+
+ command += f" TIMELINE {timeline}"
+
+ if options:
+ if slot_type == REPLICATION_PHYSICAL:
+ raise psycopg2.ProgrammingError(
+ "cannot specify output plugin options for physical replication")
+
+ command += " ("
+ for k, v in options.items():
+ if not command.endswith('('):
+ command += ", "
+ command += f"{quote_ident(k, self)} {_A(str(v))}"
+ command += ")"
+
+ self.start_replication_expert(
+ command, decode=decode, status_interval=status_interval)
+
+ # allows replication cursors to be used in select.select() directly
+ def fileno(self):
+ return self.connection.fileno()
+
+
+# a dbtype and adapter for Python UUID type
+
+class UUID_adapter:
+ """Adapt Python's uuid.UUID__ type to PostgreSQL's uuid__.
+
+ .. __: https://docs.python.org/library/uuid.html
+ .. __: https://www.postgresql.org/docs/current/static/datatype-uuid.html
+ """
+
+ def __init__(self, uuid):
+ self._uuid = uuid
+
+ def __conform__(self, proto):
+ if proto is _ext.ISQLQuote:
+ return self
+
+ def getquoted(self):
+ return (f"'{self._uuid}'::uuid").encode('utf8')
+
+ def __str__(self):
+ return f"'{self._uuid}'::uuid"
+
+
+def register_uuid(oids=None, conn_or_curs=None):
+ """Create the UUID type and an uuid.UUID adapter.
+
+ :param oids: oid for the PostgreSQL :sql:`uuid` type, or 2-items sequence
+ with oids of the type and the array. If not specified, use PostgreSQL
+ standard oids.
+ :param conn_or_curs: where to register the typecaster. If not specified,
+ register it globally.
+ """
+
+ import uuid
+
+ if not oids:
+ oid1 = 2950
+ oid2 = 2951
+ elif isinstance(oids, (list, tuple)):
+ oid1, oid2 = oids
+ else:
+ oid1 = oids
+ oid2 = 2951
+
+ _ext.UUID = _ext.new_type((oid1, ), "UUID",
+ lambda data, cursor: data and uuid.UUID(data) or None)
+ _ext.UUIDARRAY = _ext.new_array_type((oid2,), "UUID[]", _ext.UUID)
+
+ _ext.register_type(_ext.UUID, conn_or_curs)
+ _ext.register_type(_ext.UUIDARRAY, conn_or_curs)
+ _ext.register_adapter(uuid.UUID, UUID_adapter)
+
+ return _ext.UUID
+
+
+# a type, dbtype and adapter for PostgreSQL inet type
+
+class Inet:
+ """Wrap a string to allow for correct SQL-quoting of inet values.
+
+ Note that this adapter does NOT check the passed value to make
+ sure it really is an inet-compatible address but DOES call adapt()
+ on it to make sure it is impossible to execute an SQL-injection
+ by passing an evil value to the initializer.
+ """
+ def __init__(self, addr):
+ self.addr = addr
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}({self.addr!r})"
+
+ def prepare(self, conn):
+ self._conn = conn
+
+ def getquoted(self):
+ obj = _A(self.addr)
+ if hasattr(obj, 'prepare'):
+ obj.prepare(self._conn)
+ return obj.getquoted() + b"::inet"
+
+ def __conform__(self, proto):
+ if proto is _ext.ISQLQuote:
+ return self
+
+ def __str__(self):
+ return str(self.addr)
+
+
+def register_inet(oid=None, conn_or_curs=None):
+ """Create the INET type and an Inet adapter.
+
+ :param oid: oid for the PostgreSQL :sql:`inet` type, or 2-items sequence
+ with oids of the type and the array. If not specified, use PostgreSQL
+ standard oids.
+ :param conn_or_curs: where to register the typecaster. If not specified,
+ register it globally.
+ """
+ import warnings
+ warnings.warn(
+ "the inet adapter is deprecated, it's not very useful",
+ DeprecationWarning)
+
+ if not oid:
+ oid1 = 869
+ oid2 = 1041
+ elif isinstance(oid, (list, tuple)):
+ oid1, oid2 = oid
+ else:
+ oid1 = oid
+ oid2 = 1041
+
+ _ext.INET = _ext.new_type((oid1, ), "INET",
+ lambda data, cursor: data and Inet(data) or None)
+ _ext.INETARRAY = _ext.new_array_type((oid2, ), "INETARRAY", _ext.INET)
+
+ _ext.register_type(_ext.INET, conn_or_curs)
+ _ext.register_type(_ext.INETARRAY, conn_or_curs)
+
+ return _ext.INET
+
+
+def wait_select(conn):
+ """Wait until a connection or cursor has data available.
+
+ The function is an example of a wait callback to be registered with
+ `~psycopg2.extensions.set_wait_callback()`. This function uses
+ :py:func:`~select.select()` to wait for data to become available, and
+ therefore is able to handle/receive SIGINT/KeyboardInterrupt.
+ """
+ import select
+ from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
+
+ while True:
+ try:
+ state = conn.poll()
+ if state == POLL_OK:
+ break
+ elif state == POLL_READ:
+ select.select([conn.fileno()], [], [])
+ elif state == POLL_WRITE:
+ select.select([], [conn.fileno()], [])
+ else:
+ raise conn.OperationalError(f"bad state from poll: {state}")
+ except KeyboardInterrupt:
+ conn.cancel()
+ # the loop will be broken by a server error
+ continue
+
+
+def _solve_conn_curs(conn_or_curs):
+ """Return the connection and a DBAPI cursor from a connection or cursor."""
+ if conn_or_curs is None:
+ raise psycopg2.ProgrammingError("no connection or cursor provided")
+
+ if hasattr(conn_or_curs, 'execute'):
+ conn = conn_or_curs.connection
+ curs = conn.cursor(cursor_factory=_cursor)
+ else:
+ conn = conn_or_curs
+ curs = conn.cursor(cursor_factory=_cursor)
+
+ return conn, curs
+
+
+class HstoreAdapter:
+ """Adapt a Python dict to the hstore syntax."""
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def prepare(self, conn):
+ self.conn = conn
+
+ # use an old-style getquoted implementation if required
+ if conn.info.server_version < 90000:
+ self.getquoted = self._getquoted_8
+
+ def _getquoted_8(self):
+ """Use the operators available in PG pre-9.0."""
+ if not self.wrapped:
+ return b"''::hstore"
+
+ adapt = _ext.adapt
+ rv = []
+ for k, v in self.wrapped.items():
+ k = adapt(k)
+ k.prepare(self.conn)
+ k = k.getquoted()
+
+ if v is not None:
+ v = adapt(v)
+ v.prepare(self.conn)
+ v = v.getquoted()
+ else:
+ v = b'NULL'
+
+ # XXX this b'ing is painfully inefficient!
+ rv.append(b"(" + k + b" => " + v + b")")
+
+ return b"(" + b'||'.join(rv) + b")"
+
+ def _getquoted_9(self):
+ """Use the hstore(text[], text[]) function."""
+ if not self.wrapped:
+ return b"''::hstore"
+
+ k = _ext.adapt(list(self.wrapped.keys()))
+ k.prepare(self.conn)
+ v = _ext.adapt(list(self.wrapped.values()))
+ v.prepare(self.conn)
+ return b"hstore(" + k.getquoted() + b", " + v.getquoted() + b")"
+
+ getquoted = _getquoted_9
+
+ _re_hstore = _re.compile(r"""
+ # hstore key:
+ # a string of normal or escaped chars
+ "((?: [^"\\] | \\. )*)"
+ \s*=>\s* # hstore value
+ (?:
+ NULL # the value can be null - not catched
+ # or a quoted string like the key
+ | "((?: [^"\\] | \\. )*)"
+ )
+ (?:\s*,\s*|$) # pairs separated by comma or end of string.
+ """, _re.VERBOSE)
+
+ @classmethod
+ def parse(self, s, cur, _bsdec=_re.compile(r"\\(.)")):
+ """Parse an hstore representation in a Python string.
+
+ The hstore is represented as something like::
+
+ "a"=>"1", "b"=>"2"
+
+ with backslash-escaped strings.
+ """
+ if s is None:
+ return None
+
+ rv = {}
+ start = 0
+ for m in self._re_hstore.finditer(s):
+ if m is None or m.start() != start:
+ raise psycopg2.InterfaceError(
+ f"error parsing hstore pair at char {start}")
+ k = _bsdec.sub(r'\1', m.group(1))
+ v = m.group(2)
+ if v is not None:
+ v = _bsdec.sub(r'\1', v)
+
+ rv[k] = v
+ start = m.end()
+
+ if start < len(s):
+ raise psycopg2.InterfaceError(
+ f"error parsing hstore: unparsed data after char {start}")
+
+ return rv
+
+ @classmethod
+ def parse_unicode(self, s, cur):
+ """Parse an hstore returning unicode keys and values."""
+ if s is None:
+ return None
+
+ s = s.decode(_ext.encodings[cur.connection.encoding])
+ return self.parse(s, cur)
+
+ @classmethod
+ def get_oids(self, conn_or_curs):
+ """Return the lists of OID of the hstore and hstore[] types.
+ """
+ conn, curs = _solve_conn_curs(conn_or_curs)
+
+ # Store the transaction status of the connection to revert it after use
+ conn_status = conn.status
+
+ # column typarray not available before PG 8.3
+ typarray = conn.info.server_version >= 80300 and "typarray" or "NULL"
+
+ rv0, rv1 = [], []
+
+ # get the oid for the hstore
+ curs.execute(f"""SELECT t.oid, {typarray}
+FROM pg_type t JOIN pg_namespace ns
+ ON typnamespace = ns.oid
+WHERE typname = 'hstore';
+""")
+ for oids in curs:
+ rv0.append(oids[0])
+ rv1.append(oids[1])
+
+ # revert the status of the connection as before the command
+ if (conn_status != _ext.STATUS_IN_TRANSACTION
+ and not conn.autocommit):
+ conn.rollback()
+
+ return tuple(rv0), tuple(rv1)
+
+
+def register_hstore(conn_or_curs, globally=False, unicode=False,
+ oid=None, array_oid=None):
+ r"""Register adapter and typecaster for `!dict`\-\ |hstore| conversions.
+
+ :param conn_or_curs: a connection or cursor: the typecaster will be
+ registered only on this object unless *globally* is set to `!True`
+ :param globally: register the adapter globally, not only on *conn_or_curs*
+ :param unicode: if `!True`, keys and values returned from the database
+ will be `!unicode` instead of `!str`. The option is not available on
+ Python 3
+ :param oid: the OID of the |hstore| type if known. If not, it will be
+ queried on *conn_or_curs*.
+ :param array_oid: the OID of the |hstore| array type if known. If not, it
+ will be queried on *conn_or_curs*.
+
+ The connection or cursor passed to the function will be used to query the
+ database and look for the OID of the |hstore| type (which may be different
+ across databases). If querying is not desirable (e.g. with
+ :ref:`asynchronous connections <async-support>`) you may specify it in the
+ *oid* parameter, which can be found using a query such as :sql:`SELECT
+ 'hstore'::regtype::oid`. Analogously you can obtain a value for *array_oid*
+ using a query such as :sql:`SELECT 'hstore[]'::regtype::oid`.
+
+ Note that, when passing a dictionary from Python to the database, both
+ strings and unicode keys and values are supported. Dictionaries returned
+ from the database have keys/values according to the *unicode* parameter.
+
+ The |hstore| contrib module must be already installed in the database
+ (executing the ``hstore.sql`` script in your ``contrib`` directory).
+ Raise `~psycopg2.ProgrammingError` if the type is not found.
+ """
+ if oid is None:
+ oid = HstoreAdapter.get_oids(conn_or_curs)
+ if oid is None or not oid[0]:
+ raise psycopg2.ProgrammingError(
+ "hstore type not found in the database. "
+ "please install it from your 'contrib/hstore.sql' file")
+ else:
+ array_oid = oid[1]
+ oid = oid[0]
+
+ if isinstance(oid, int):
+ oid = (oid,)
+
+ if array_oid is not None:
+ if isinstance(array_oid, int):
+ array_oid = (array_oid,)
+ else:
+ array_oid = tuple([x for x in array_oid if x])
+
+ # create and register the typecaster
+ HSTORE = _ext.new_type(oid, "HSTORE", HstoreAdapter.parse)
+ _ext.register_type(HSTORE, not globally and conn_or_curs or None)
+ _ext.register_adapter(dict, HstoreAdapter)
+
+ if array_oid:
+ HSTOREARRAY = _ext.new_array_type(array_oid, "HSTOREARRAY", HSTORE)
+ _ext.register_type(HSTOREARRAY, not globally and conn_or_curs or None)
+
+
+class CompositeCaster:
+ """Helps conversion of a PostgreSQL composite type into a Python object.
+
+ The class is usually created by the `register_composite()` function.
+ You may want to create and register manually instances of the class if
+ querying the database at registration time is not desirable (such as when
+ using an :ref:`asynchronous connections <async-support>`).
+
+ """
+ def __init__(self, name, oid, attrs, array_oid=None, schema=None):
+ self.name = name
+ self.schema = schema
+ self.oid = oid
+ self.array_oid = array_oid
+
+ self.attnames = [a[0] for a in attrs]
+ self.atttypes = [a[1] for a in attrs]
+ self._create_type(name, self.attnames)
+ self.typecaster = _ext.new_type((oid,), name, self.parse)
+ if array_oid:
+ self.array_typecaster = _ext.new_array_type(
+ (array_oid,), f"{name}ARRAY", self.typecaster)
+ else:
+ self.array_typecaster = None
+
+ def parse(self, s, curs):
+ if s is None:
+ return None
+
+ tokens = self.tokenize(s)
+ if len(tokens) != len(self.atttypes):
+ raise psycopg2.DataError(
+ "expecting %d components for the type %s, %d found instead" %
+ (len(self.atttypes), self.name, len(tokens)))
+
+ values = [curs.cast(oid, token)
+ for oid, token in zip(self.atttypes, tokens)]
+
+ return self.make(values)
+
+ def make(self, values):
+ """Return a new Python object representing the data being casted.
+
+ *values* is the list of attributes, already casted into their Python
+ representation.
+
+ You can subclass this method to :ref:`customize the composite cast
+ <custom-composite>`.
+ """
+
+ return self._ctor(values)
+
+ _re_tokenize = _re.compile(r"""
+ \(? ([,)]) # an empty token, representing NULL
+| \(? " ((?: [^"] | "")*) " [,)] # or a quoted string
+| \(? ([^",)]+) [,)] # or an unquoted string
+ """, _re.VERBOSE)
+
+ _re_undouble = _re.compile(r'(["\\])\1')
+
+ @classmethod
+ def tokenize(self, s):
+ rv = []
+ for m in self._re_tokenize.finditer(s):
+ if m is None:
+ raise psycopg2.InterfaceError(f"can't parse type: {s!r}")
+ if m.group(1) is not None:
+ rv.append(None)
+ elif m.group(2) is not None:
+ rv.append(self._re_undouble.sub(r"\1", m.group(2)))
+ else:
+ rv.append(m.group(3))
+
+ return rv
+
+ def _create_type(self, name, attnames):
+ self.type = namedtuple(name, attnames)
+ self._ctor = self.type._make
+
+ @classmethod
+ def _from_db(self, name, conn_or_curs):
+ """Return a `CompositeCaster` instance for the type *name*.
+
+ Raise `ProgrammingError` if the type is not found.
+ """
+ conn, curs = _solve_conn_curs(conn_or_curs)
+
+ # Store the transaction status of the connection to revert it after use
+ conn_status = conn.status
+
+ # Use the correct schema
+ if '.' in name:
+ schema, tname = name.split('.', 1)
+ else:
+ tname = name
+ schema = 'public'
+
+ # column typarray not available before PG 8.3
+ typarray = conn.info.server_version >= 80300 and "typarray" or "NULL"
+
+ # get the type oid and attributes
+ curs.execute("""\
+SELECT t.oid, %s, attname, atttypid
+FROM pg_type t
+JOIN pg_namespace ns ON typnamespace = ns.oid
+JOIN pg_attribute a ON attrelid = typrelid
+WHERE typname = %%s AND nspname = %%s
+ AND attnum > 0 AND NOT attisdropped
+ORDER BY attnum;
+""" % typarray, (tname, schema))
+
+ recs = curs.fetchall()
+
+ # revert the status of the connection as before the command
+ if (conn_status != _ext.STATUS_IN_TRANSACTION
+ and not conn.autocommit):
+ conn.rollback()
+
+ if not recs:
+ raise psycopg2.ProgrammingError(
+ f"PostgreSQL type '{name}' not found")
+
+ type_oid = recs[0][0]
+ array_oid = recs[0][1]
+ type_attrs = [(r[2], r[3]) for r in recs]
+
+ return self(tname, type_oid, type_attrs,
+ array_oid=array_oid, schema=schema)
+
+
+def register_composite(name, conn_or_curs, globally=False, factory=None):
+ """Register a typecaster to convert a composite type into a tuple.
+
+ :param name: the name of a PostgreSQL composite type, e.g. created using
+ the |CREATE TYPE|_ command
+ :param conn_or_curs: a connection or cursor used to find the type oid and
+ components; the typecaster is registered in a scope limited to this
+ object, unless *globally* is set to `!True`
+ :param globally: if `!False` (default) register the typecaster only on
+ *conn_or_curs*, otherwise register it globally
+ :param factory: if specified it should be a `CompositeCaster` subclass: use
+ it to :ref:`customize how to cast composite types <custom-composite>`
+ :return: the registered `CompositeCaster` or *factory* instance
+ responsible for the conversion
+ """
+ if factory is None:
+ factory = CompositeCaster
+
+ caster = factory._from_db(name, conn_or_curs)
+ _ext.register_type(caster.typecaster, not globally and conn_or_curs or None)
+
+ if caster.array_typecaster is not None:
+ _ext.register_type(
+ caster.array_typecaster, not globally and conn_or_curs or None)
+
+ return caster
+
+
+def _paginate(seq, page_size):
+ """Consume an iterable and return it in chunks.
+
+ Every chunk is at most `page_size`. Never return an empty chunk.
+ """
+ page = []
+ it = iter(seq)
+ while True:
+ try:
+ for i in range(page_size):
+ page.append(next(it))
+ yield page
+ page = []
+ except StopIteration:
+ if page:
+ yield page
+ return
+
+
+def execute_batch(cur, sql, argslist, page_size=100):
+ r"""Execute groups of statements in fewer server roundtrips.
+
+ Execute *sql* several times, against all parameters set (sequences or
+ mappings) found in *argslist*.
+
+ The function is semantically similar to
+
+ .. parsed-literal::
+
+ *cur*\.\ `~cursor.executemany`\ (\ *sql*\ , *argslist*\ )
+
+ but has a different implementation: Psycopg will join the statements into
+ fewer multi-statement commands, each one containing at most *page_size*
+ statements, resulting in a reduced number of server roundtrips.
+
+ After the execution of the function the `cursor.rowcount` property will
+ **not** contain a total result.
+
+ """
+ for page in _paginate(argslist, page_size=page_size):
+ sqls = [cur.mogrify(sql, args) for args in page]
+ cur.execute(b";".join(sqls))
+
+
+def execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False):
+ '''Execute a statement using :sql:`VALUES` with a sequence of parameters.
+
+ :param cur: the cursor to use to execute the query.
+
+ :param sql: the query to execute. It must contain a single ``%s``
+ placeholder, which will be replaced by a `VALUES list`__.
+ Example: ``"INSERT INTO mytable (id, f1, f2) VALUES %s"``.
+
+ :param argslist: sequence of sequences or dictionaries with the arguments
+ to send to the query. The type and content must be consistent with
+ *template*.
+
+ :param template: the snippet to merge to every item in *argslist* to
+ compose the query.
+
+ - If the *argslist* items are sequences it should contain positional
+ placeholders (e.g. ``"(%s, %s, %s)"``, or ``"(%s, %s, 42)``" if there
+ are constants value...).
+
+ - If the *argslist* items are mappings it should contain named
+ placeholders (e.g. ``"(%(id)s, %(f1)s, 42)"``).
+
+ If not specified, assume the arguments are sequence and use a simple
+ positional template (i.e. ``(%s, %s, ...)``), with the number of
+ placeholders sniffed by the first element in *argslist*.
+
+ :param page_size: maximum number of *argslist* items to include in every
+ statement. If there are more items the function will execute more than
+ one statement.
+
+ :param fetch: if `!True` return the query results into a list (like in a
+ `~cursor.fetchall()`). Useful for queries with :sql:`RETURNING`
+ clause.
+
+ .. __: https://www.postgresql.org/docs/current/static/queries-values.html
+
+ After the execution of the function the `cursor.rowcount` property will
+ **not** contain a total result.
+
+ While :sql:`INSERT` is an obvious candidate for this function it is
+ possible to use it with other statements, for example::
+
+ >>> cur.execute(
+ ... "create table test (id int primary key, v1 int, v2 int)")
+
+ >>> execute_values(cur,
+ ... "INSERT INTO test (id, v1, v2) VALUES %s",
+ ... [(1, 2, 3), (4, 5, 6), (7, 8, 9)])
+
+ >>> execute_values(cur,
+ ... """UPDATE test SET v1 = data.v1 FROM (VALUES %s) AS data (id, v1)
+ ... WHERE test.id = data.id""",
+ ... [(1, 20), (4, 50)])
+
+ >>> cur.execute("select * from test order by id")
+ >>> cur.fetchall()
+ [(1, 20, 3), (4, 50, 6), (7, 8, 9)])
+
+ '''
+ from psycopg2.sql import Composable
+ if isinstance(sql, Composable):
+ sql = sql.as_string(cur)
+
+ # we can't just use sql % vals because vals is bytes: if sql is bytes
+ # there will be some decoding error because of stupid codec used, and Py3
+ # doesn't implement % on bytes.
+ if not isinstance(sql, bytes):
+ sql = sql.encode(_ext.encodings[cur.connection.encoding])
+ pre, post = _split_sql(sql)
+
+ result = [] if fetch else None
+ for page in _paginate(argslist, page_size=page_size):
+ if template is None:
+ template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'
+ parts = pre[:]
+ for args in page:
+ parts.append(cur.mogrify(template, args))
+ parts.append(b',')
+ parts[-1:] = post
+ cur.execute(b''.join(parts))
+ if fetch:
+ result.extend(cur.fetchall())
+
+ return result
+
+
+def _split_sql(sql):
+ """Split *sql* on a single ``%s`` placeholder.
+
+ Split on the %s, perform %% replacement and return pre, post lists of
+ snippets.
+ """
+ curr = pre = []
+ post = []
+ tokens = _re.split(br'(%.)', sql)
+ for token in tokens:
+ if len(token) != 2 or token[:1] != b'%':
+ curr.append(token)
+ continue
+
+ if token[1:] == b's':
+ if curr is pre:
+ curr = post
+ else:
+ raise ValueError(
+ "the query contains more than one '%s' placeholder")
+ elif token[1:] == b'%':
+ curr.append(b'%')
+ else:
+ raise ValueError("unsupported format character: '%s'"
+ % token[1:].decode('ascii', 'replace'))
+
+ if curr is pre:
+ raise ValueError("the query doesn't contain any '%s' placeholder")
+
+ return pre, post
diff --git a/lib/pool.py b/lib/pool.py
new file mode 100644
index 0000000..9d67d68
--- /dev/null
+++ b/lib/pool.py
@@ -0,0 +1,187 @@
+"""Connection pooling for psycopg2
+
+This module implements thread-safe (and not) connection pools.
+"""
+# psycopg/pool.py - pooling code for psycopg
+#
+# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+import psycopg2
+from psycopg2 import extensions as _ext
+
+
+class PoolError(psycopg2.Error):
+ pass
+
+
+class AbstractConnectionPool:
+ """Generic key-based pooling code."""
+
+ def __init__(self, minconn, maxconn, *args, **kwargs):
+ """Initialize the connection pool.
+
+ New 'minconn' connections are created immediately calling 'connfunc'
+ with given parameters. The connection pool will support a maximum of
+ about 'maxconn' connections.
+ """
+ self.minconn = int(minconn)
+ self.maxconn = int(maxconn)
+ self.closed = False
+
+ self._args = args
+ self._kwargs = kwargs
+
+ self._pool = []
+ self._used = {}
+ self._rused = {} # id(conn) -> key map
+ self._keys = 0
+
+ for i in range(self.minconn):
+ self._connect()
+
+ def _connect(self, key=None):
+ """Create a new connection and assign it to 'key' if not None."""
+ conn = psycopg2.connect(*self._args, **self._kwargs)
+ if key is not None:
+ self._used[key] = conn
+ self._rused[id(conn)] = key
+ else:
+ self._pool.append(conn)
+ return conn
+
+ def _getkey(self):
+ """Return a new unique key."""
+ self._keys += 1
+ return self._keys
+
+ def _getconn(self, key=None):
+ """Get a free connection and assign it to 'key' if not None."""
+ if self.closed:
+ raise PoolError("connection pool is closed")
+ if key is None:
+ key = self._getkey()
+
+ if key in self._used:
+ return self._used[key]
+
+ if self._pool:
+ self._used[key] = conn = self._pool.pop()
+ self._rused[id(conn)] = key
+ return conn
+ else:
+ if len(self._used) == self.maxconn:
+ raise PoolError("connection pool exhausted")
+ return self._connect(key)
+
+ def _putconn(self, conn, key=None, close=False):
+ """Put away a connection."""
+ if self.closed:
+ raise PoolError("connection pool is closed")
+
+ if key is None:
+ key = self._rused.get(id(conn))
+ if key is None:
+ raise PoolError("trying to put unkeyed connection")
+
+ if len(self._pool) < self.minconn and not close:
+ # Return the connection into a consistent state before putting
+ # it back into the pool
+ if not conn.closed:
+ status = conn.info.transaction_status
+ if status == _ext.TRANSACTION_STATUS_UNKNOWN:
+ # server connection lost
+ conn.close()
+ elif status != _ext.TRANSACTION_STATUS_IDLE:
+ # connection in error or in transaction
+ conn.rollback()
+ self._pool.append(conn)
+ else:
+ # regular idle connection
+ self._pool.append(conn)
+ # If the connection is closed, we just discard it.
+ else:
+ conn.close()
+
+ # here we check for the presence of key because it can happen that a
+ # thread tries to put back a connection after a call to close
+ if not self.closed or key in self._used:
+ del self._used[key]
+ del self._rused[id(conn)]
+
+ def _closeall(self):
+ """Close all connections.
+
+ Note that this can lead to some code fail badly when trying to use
+ an already closed connection. If you call .closeall() make sure
+ your code can deal with it.
+ """
+ if self.closed:
+ raise PoolError("connection pool is closed")
+ for conn in self._pool + list(self._used.values()):
+ try:
+ conn.close()
+ except Exception:
+ pass
+ self.closed = True
+
+
+class SimpleConnectionPool(AbstractConnectionPool):
+ """A connection pool that can't be shared across different threads."""
+
+ getconn = AbstractConnectionPool._getconn
+ putconn = AbstractConnectionPool._putconn
+ closeall = AbstractConnectionPool._closeall
+
+
+class ThreadedConnectionPool(AbstractConnectionPool):
+ """A connection pool that works with the threading module."""
+
+ def __init__(self, minconn, maxconn, *args, **kwargs):
+ """Initialize the threading lock."""
+ import threading
+ AbstractConnectionPool.__init__(
+ self, minconn, maxconn, *args, **kwargs)
+ self._lock = threading.Lock()
+
+ def getconn(self, key=None):
+ """Get a free connection and assign it to 'key' if not None."""
+ self._lock.acquire()
+ try:
+ return self._getconn(key)
+ finally:
+ self._lock.release()
+
+ def putconn(self, conn=None, key=None, close=False):
+ """Put away an unused connection."""
+ self._lock.acquire()
+ try:
+ self._putconn(conn, key, close)
+ finally:
+ self._lock.release()
+
+ def closeall(self):
+ """Close all connections (even the one currently in use.)"""
+ self._lock.acquire()
+ try:
+ self._closeall()
+ finally:
+ self._lock.release()
diff --git a/lib/sql.py b/lib/sql.py
new file mode 100644
index 0000000..69b352b
--- /dev/null
+++ b/lib/sql.py
@@ -0,0 +1,455 @@
+"""SQL composition utility module
+"""
+
+# psycopg/sql.py - SQL composition utility module
+#
+# Copyright (C) 2016-2019 Daniele Varrazzo <daniele.varrazzo@gmail.com>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+import string
+
+from psycopg2 import extensions as ext
+
+
+_formatter = string.Formatter()
+
+
+class Composable:
+ """
+ Abstract base class for objects that can be used to compose an SQL string.
+
+ `!Composable` objects can be passed directly to `~cursor.execute()`,
+ `~cursor.executemany()`, `~cursor.copy_expert()` in place of the query
+ string.
+
+ `!Composable` objects can be joined using the ``+`` operator: the result
+ will be a `Composed` instance containing the objects joined. The operator
+ ``*`` is also supported with an integer argument: the result is a
+ `!Composed` instance containing the left argument repeated as many times as
+ requested.
+ """
+ def __init__(self, wrapped):
+ self._wrapped = wrapped
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}({self._wrapped!r})"
+
+ def as_string(self, context):
+ """
+ Return the string value of the object.
+
+ :param context: the context to evaluate the string into.
+ :type context: `connection` or `cursor`
+
+ The method is automatically invoked by `~cursor.execute()`,
+ `~cursor.executemany()`, `~cursor.copy_expert()` if a `!Composable` is
+ passed instead of the query string.
+ """
+ raise NotImplementedError
+
+ def __add__(self, other):
+ if isinstance(other, Composed):
+ return Composed([self]) + other
+ if isinstance(other, Composable):
+ return Composed([self]) + Composed([other])
+ else:
+ return NotImplemented
+
+ def __mul__(self, n):
+ return Composed([self] * n)
+
+ def __eq__(self, other):
+ return type(self) is type(other) and self._wrapped == other._wrapped
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class Composed(Composable):
+ """
+ A `Composable` object made of a sequence of `!Composable`.
+
+ The object is usually created using `!Composable` operators and methods.
+ However it is possible to create a `!Composed` directly specifying a
+ sequence of `!Composable` as arguments.
+
+ Example::
+
+ >>> comp = sql.Composed(
+ ... [sql.SQL("insert into "), sql.Identifier("table")])
+ >>> print(comp.as_string(conn))
+ insert into "table"
+
+ `!Composed` objects are iterable (so they can be used in `SQL.join` for
+ instance).
+ """
+ def __init__(self, seq):
+ wrapped = []
+ for i in seq:
+ if not isinstance(i, Composable):
+ raise TypeError(
+ f"Composed elements must be Composable, got {i!r} instead")
+ wrapped.append(i)
+
+ super().__init__(wrapped)
+
+ @property
+ def seq(self):
+ """The list of the content of the `!Composed`."""
+ return list(self._wrapped)
+
+ def as_string(self, context):
+ rv = []
+ for i in self._wrapped:
+ rv.append(i.as_string(context))
+ return ''.join(rv)
+
+ def __iter__(self):
+ return iter(self._wrapped)
+
+ def __add__(self, other):
+ if isinstance(other, Composed):
+ return Composed(self._wrapped + other._wrapped)
+ if isinstance(other, Composable):
+ return Composed(self._wrapped + [other])
+ else:
+ return NotImplemented
+
+ def join(self, joiner):
+ """
+ Return a new `!Composed` interposing the *joiner* with the `!Composed` items.
+
+ The *joiner* must be a `SQL` or a string which will be interpreted as
+ an `SQL`.
+
+ Example::
+
+ >>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed
+ >>> print(fields.join(', ').as_string(conn))
+ "foo", "bar"
+
+ """
+ if isinstance(joiner, str):
+ joiner = SQL(joiner)
+ elif not isinstance(joiner, SQL):
+ raise TypeError(
+ "Composed.join() argument must be a string or an SQL")
+
+ return joiner.join(self)
+
+
+class SQL(Composable):
+ """
+ A `Composable` representing a snippet of SQL statement.
+
+ `!SQL` exposes `join()` and `format()` methods useful to create a template
+ where to merge variable parts of a query (for instance field or table
+ names).
+
+ The *string* doesn't undergo any form of escaping, so it is not suitable to
+ represent variable identifiers or values: you should only use it to pass
+ constant strings representing templates or snippets of SQL statements; use
+ other objects such as `Identifier` or `Literal` to represent variable
+ parts.
+
+ Example::
+
+ >>> query = sql.SQL("select {0} from {1}").format(
+ ... sql.SQL(', ').join([sql.Identifier('foo'), sql.Identifier('bar')]),
+ ... sql.Identifier('table'))
+ >>> print(query.as_string(conn))
+ select "foo", "bar" from "table"
+ """
+ def __init__(self, string):
+ if not isinstance(string, str):
+ raise TypeError("SQL values must be strings")
+ super().__init__(string)
+
+ @property
+ def string(self):
+ """The string wrapped by the `!SQL` object."""
+ return self._wrapped
+
+ def as_string(self, context):
+ return self._wrapped
+
+ def format(self, *args, **kwargs):
+ """
+ Merge `Composable` objects into a template.
+
+ :param `Composable` args: parameters to replace to numbered
+ (``{0}``, ``{1}``) or auto-numbered (``{}``) placeholders
+ :param `Composable` kwargs: parameters to replace to named (``{name}``)
+ placeholders
+ :return: the union of the `!SQL` string with placeholders replaced
+ :rtype: `Composed`
+
+ The method is similar to the Python `str.format()` method: the string
+ template supports auto-numbered (``{}``), numbered (``{0}``,
+ ``{1}``...), and named placeholders (``{name}``), with positional
+ arguments replacing the numbered placeholders and keywords replacing
+ the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``)
+ are not supported. Only `!Composable` objects can be passed to the
+ template.
+
+ Example::
+
+ >>> print(sql.SQL("select * from {} where {} = %s")
+ ... .format(sql.Identifier('people'), sql.Identifier('id'))
+ ... .as_string(conn))
+ select * from "people" where "id" = %s
+
+ >>> print(sql.SQL("select * from {tbl} where {pkey} = %s")
+ ... .format(tbl=sql.Identifier('people'), pkey=sql.Identifier('id'))
+ ... .as_string(conn))
+ select * from "people" where "id" = %s
+
+ """
+ rv = []
+ autonum = 0
+ for pre, name, spec, conv in _formatter.parse(self._wrapped):
+ if spec:
+ raise ValueError("no format specification supported by SQL")
+ if conv:
+ raise ValueError("no format conversion supported by SQL")
+ if pre:
+ rv.append(SQL(pre))
+
+ if name is None:
+ continue
+
+ if name.isdigit():
+ if autonum:
+ raise ValueError(
+ "cannot switch from automatic field numbering to manual")
+ rv.append(args[int(name)])
+ autonum = None
+
+ elif not name:
+ if autonum is None:
+ raise ValueError(
+ "cannot switch from manual field numbering to automatic")
+ rv.append(args[autonum])
+ autonum += 1
+
+ else:
+ rv.append(kwargs[name])
+
+ return Composed(rv)
+
+ def join(self, seq):
+ """
+ Join a sequence of `Composable`.
+
+ :param seq: the elements to join.
+ :type seq: iterable of `!Composable`
+
+ Use the `!SQL` object's *string* to separate the elements in *seq*.
+ Note that `Composed` objects are iterable too, so they can be used as
+ argument for this method.
+
+ Example::
+
+ >>> snip = sql.SQL(', ').join(
+ ... sql.Identifier(n) for n in ['foo', 'bar', 'baz'])
+ >>> print(snip.as_string(conn))
+ "foo", "bar", "baz"
+ """
+ rv = []
+ it = iter(seq)
+ try:
+ rv.append(next(it))
+ except StopIteration:
+ pass
+ else:
+ for i in it:
+ rv.append(self)
+ rv.append(i)
+
+ return Composed(rv)
+
+
+class Identifier(Composable):
+ """
+ A `Composable` representing an SQL identifier or a dot-separated sequence.
+
+ Identifiers usually represent names of database objects, such as tables or
+ fields. PostgreSQL identifiers follow `different rules`__ than SQL string
+ literals for escaping (e.g. they use double quotes instead of single).
+
+ .. __: https://www.postgresql.org/docs/current/static/sql-syntax-lexical.html# \
+ SQL-SYNTAX-IDENTIFIERS
+
+ Example::
+
+ >>> t1 = sql.Identifier("foo")
+ >>> t2 = sql.Identifier("ba'r")
+ >>> t3 = sql.Identifier('ba"z')
+ >>> print(sql.SQL(', ').join([t1, t2, t3]).as_string(conn))
+ "foo", "ba'r", "ba""z"
+
+ Multiple strings can be passed to the object to represent a qualified name,
+ i.e. a dot-separated sequence of identifiers.
+
+ Example::
+
+ >>> query = sql.SQL("select {} from {}").format(
+ ... sql.Identifier("table", "field"),
+ ... sql.Identifier("schema", "table"))
+ >>> print(query.as_string(conn))
+ select "table"."field" from "schema"."table"
+
+ """
+ def __init__(self, *strings):
+ if not strings:
+ raise TypeError("Identifier cannot be empty")
+
+ for s in strings:
+ if not isinstance(s, str):
+ raise TypeError("SQL identifier parts must be strings")
+
+ super().__init__(strings)
+
+ @property
+ def strings(self):
+ """A tuple with the strings wrapped by the `Identifier`."""
+ return self._wrapped
+
+ @property
+ def string(self):
+ """The string wrapped by the `Identifier`.
+ """
+ if len(self._wrapped) == 1:
+ return self._wrapped[0]
+ else:
+ raise AttributeError(
+ "the Identifier wraps more than one than one string")
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}({', '.join(map(repr, self._wrapped))})"
+
+ def as_string(self, context):
+ return '.'.join(ext.quote_ident(s, context) for s in self._wrapped)
+
+
+class Literal(Composable):
+ """
+ A `Composable` representing an SQL value to include in a query.
+
+ Usually you will want to include placeholders in the query and pass values
+ as `~cursor.execute()` arguments. If however you really really need to
+ include a literal value in the query you can use this object.
+
+ The string returned by `!as_string()` follows the normal :ref:`adaptation
+ rules <python-types-adaptation>` for Python objects.
+
+ Example::
+
+ >>> s1 = sql.Literal("foo")
+ >>> s2 = sql.Literal("ba'r")
+ >>> s3 = sql.Literal(42)
+ >>> print(sql.SQL(', ').join([s1, s2, s3]).as_string(conn))
+ 'foo', 'ba''r', 42
+
+ """
+ @property
+ def wrapped(self):
+ """The object wrapped by the `!Literal`."""
+ return self._wrapped
+
+ def as_string(self, context):
+ # is it a connection or cursor?
+ if isinstance(context, ext.connection):
+ conn = context
+ elif isinstance(context, ext.cursor):
+ conn = context.connection
+ else:
+ raise TypeError("context must be a connection or a cursor")
+
+ a = ext.adapt(self._wrapped)
+ if hasattr(a, 'prepare'):
+ a.prepare(conn)
+
+ rv = a.getquoted()
+ if isinstance(rv, bytes):
+ rv = rv.decode(ext.encodings[conn.encoding])
+
+ return rv
+
+
+class Placeholder(Composable):
+ """A `Composable` representing a placeholder for query parameters.
+
+ If the name is specified, generate a named placeholder (e.g. ``%(name)s``),
+ otherwise generate a positional placeholder (e.g. ``%s``).
+
+ The object is useful to generate SQL queries with a variable number of
+ arguments.
+
+ Examples::
+
+ >>> names = ['foo', 'bar', 'baz']
+
+ >>> q1 = sql.SQL("insert into table ({}) values ({})").format(
+ ... sql.SQL(', ').join(map(sql.Identifier, names)),
+ ... sql.SQL(', ').join(sql.Placeholder() * len(names)))
+ >>> print(q1.as_string(conn))
+ insert into table ("foo", "bar", "baz") values (%s, %s, %s)
+
+ >>> q2 = sql.SQL("insert into table ({}) values ({})").format(
+ ... sql.SQL(', ').join(map(sql.Identifier, names)),
+ ... sql.SQL(', ').join(map(sql.Placeholder, names)))
+ >>> print(q2.as_string(conn))
+ insert into table ("foo", "bar", "baz") values (%(foo)s, %(bar)s, %(baz)s)
+
+ """
+
+ def __init__(self, name=None):
+ if isinstance(name, str):
+ if ')' in name:
+ raise ValueError(f"invalid name: {name!r}")
+
+ elif name is not None:
+ raise TypeError(f"expected string or None as name, got {name!r}")
+
+ super().__init__(name)
+
+ @property
+ def name(self):
+ """The name of the `!Placeholder`."""
+ return self._wrapped
+
+ def __repr__(self):
+ if self._wrapped is None:
+ return f"{self.__class__.__name__}()"
+ else:
+ return f"{self.__class__.__name__}({self._wrapped!r})"
+
+ def as_string(self, context):
+ if self._wrapped is not None:
+ return f"%({self._wrapped})s"
+ else:
+ return "%s"
+
+
+# Literals
+NULL = SQL("NULL")
+DEFAULT = SQL("DEFAULT")
diff --git a/lib/tz.py b/lib/tz.py
new file mode 100644
index 0000000..d88ca37
--- /dev/null
+++ b/lib/tz.py
@@ -0,0 +1,158 @@
+"""tzinfo implementations for psycopg2
+
+This module holds two different tzinfo implementations that can be used as
+the 'tzinfo' argument to datetime constructors, directly passed to psycopg
+functions or used to set the .tzinfo_factory attribute in cursors.
+"""
+# psycopg/tz.py - tzinfo implementation
+#
+# Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+# Copyright (C) 2020-2021 The Psycopg Team
+#
+# psycopg2 is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# In addition, as a special exception, the copyright holders give
+# permission to link this program with the OpenSSL library (or with
+# modified versions of OpenSSL that use the same license as OpenSSL),
+# and distribute linked combinations including the two.
+#
+# You must obey the GNU Lesser General Public License in all respects for
+# all of the code used other than OpenSSL.
+#
+# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+import datetime
+import time
+
+ZERO = datetime.timedelta(0)
+
+
+class FixedOffsetTimezone(datetime.tzinfo):
+ """Fixed offset in minutes east from UTC.
+
+ This is exactly the implementation__ found in Python 2.3.x documentation,
+ with a small change to the `!__init__()` method to allow for pickling
+ and a default name in the form ``sHH:MM`` (``s`` is the sign.).
+
+ The implementation also caches instances. During creation, if a
+ FixedOffsetTimezone instance has previously been created with the same
+ offset and name that instance will be returned. This saves memory and
+ improves comparability.
+
+ .. versionchanged:: 2.9
+
+ The constructor can take either a timedelta or a number of minutes of
+ offset. Previously only minutes were supported.
+
+ .. __: https://docs.python.org/library/datetime.html
+ """
+ _name = None
+ _offset = ZERO
+
+ _cache = {}
+
+ def __init__(self, offset=None, name=None):
+ if offset is not None:
+ if not isinstance(offset, datetime.timedelta):
+ offset = datetime.timedelta(minutes=offset)
+ self._offset = offset
+ if name is not None:
+ self._name = name
+
+ def __new__(cls, offset=None, name=None):
+ """Return a suitable instance created earlier if it exists
+ """
+ key = (offset, name)
+ try:
+ return cls._cache[key]
+ except KeyError:
+ tz = super().__new__(cls, offset, name)
+ cls._cache[key] = tz
+ return tz
+
+ def __repr__(self):
+ return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
+ % (self._offset, self._name)
+
+ def __eq__(self, other):
+ if isinstance(other, FixedOffsetTimezone):
+ return self._offset == other._offset
+ else:
+ return NotImplemented
+
+ def __ne__(self, other):
+ if isinstance(other, FixedOffsetTimezone):
+ return self._offset != other._offset
+ else:
+ return NotImplemented
+
+ def __getinitargs__(self):
+ return self._offset, self._name
+
+ def utcoffset(self, dt):
+ return self._offset
+
+ def tzname(self, dt):
+ if self._name is not None:
+ return self._name
+
+ minutes, seconds = divmod(self._offset.total_seconds(), 60)
+ hours, minutes = divmod(minutes, 60)
+ rv = "%+03d" % hours
+ if minutes or seconds:
+ rv += ":%02d" % minutes
+ if seconds:
+ rv += ":%02d" % seconds
+
+ return rv
+
+ def dst(self, dt):
+ return ZERO
+
+
+STDOFFSET = datetime.timedelta(seconds=-time.timezone)
+if time.daylight:
+ DSTOFFSET = datetime.timedelta(seconds=-time.altzone)
+else:
+ DSTOFFSET = STDOFFSET
+DSTDIFF = DSTOFFSET - STDOFFSET
+
+
+class LocalTimezone(datetime.tzinfo):
+ """Platform idea of local timezone.
+
+ This is the exact implementation from the Python 2.3 documentation.
+ """
+ def utcoffset(self, dt):
+ if self._isdst(dt):
+ return DSTOFFSET
+ else:
+ return STDOFFSET
+
+ def dst(self, dt):
+ if self._isdst(dt):
+ return DSTDIFF
+ else:
+ return ZERO
+
+ def tzname(self, dt):
+ return time.tzname[self._isdst(dt)]
+
+ def _isdst(self, dt):
+ tt = (dt.year, dt.month, dt.day,
+ dt.hour, dt.minute, dt.second,
+ dt.weekday(), 0, -1)
+ stamp = time.mktime(tt)
+ tt = time.localtime(stamp)
+ return tt.tm_isdst > 0
+
+
+LOCAL = LocalTimezone()
+
+# TODO: pre-generate some interesting time zones?
diff --git a/psycopg/_psycopg.vc9.amd64.manifest b/psycopg/_psycopg.vc9.amd64.manifest
new file mode 100644
index 0000000..e92d583
--- /dev/null
+++ b/psycopg/_psycopg.vc9.amd64.manifest
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+ <dependency>
+ <dependentAssembly>
+ <assemblyIdentity type="win32" name="Microsoft.VC90.CRT" version="9.0.21022.8" processorArchitecture="amd64" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
+ </dependentAssembly>
+ </dependency>
+</assembly>
diff --git a/psycopg/_psycopg.vc9.x86.manifest b/psycopg/_psycopg.vc9.x86.manifest
new file mode 100644
index 0000000..9fc55da
--- /dev/null
+++ b/psycopg/_psycopg.vc9.x86.manifest
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+ <dependency>
+ <dependentAssembly>
+ <assemblyIdentity type="win32" name="Microsoft.VC90.CRT" version="9.0.21022.8" processorArchitecture="x86" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
+ </dependentAssembly>
+ </dependency>
+</assembly>
diff --git a/psycopg/adapter_asis.c b/psycopg/adapter_asis.c
new file mode 100644
index 0000000..5c75786
--- /dev/null
+++ b/psycopg/adapter_asis.c
@@ -0,0 +1,195 @@
+/* adapter_asis.c - adapt types as they are
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#define PSYCOPG_MODULE
+#include "psycopg/psycopg.h"
+
+#include "psycopg/adapter_asis.h"
+#include "psycopg/microprotocols_proto.h"
+
+#include <string.h>
+
+
+/** the AsIs object **/
+
+static PyObject *
+asis_getquoted(asisObject *self, PyObject *args)
+{
+ PyObject *rv;
+ if (self->wrapped == Py_None) {
+ Py_INCREF(psyco_null);
+ rv = psyco_null;
+ }
+ else {
+ rv = PyObject_Str(self->wrapped);
+ /* unicode to bytes */
+ if (rv) {
+ PyObject *tmp = PyUnicode_AsUTF8String(rv);
+ Py_DECREF(rv);
+ rv = tmp;
+ }
+ }
+
+ return rv;
+}
+
+static PyObject *
+asis_str(asisObject *self)
+{
+ return psyco_ensure_text(asis_getquoted(self, NULL));
+}
+
+static PyObject *
+asis_conform(asisObject *self, PyObject *args)
+{
+ PyObject *res, *proto;
+
+ if (!PyArg_ParseTuple(args, "O", &proto)) return NULL;
+
+ if (proto == (PyObject*)&isqlquoteType)
+ res = (PyObject*)self;
+ else
+ res = Py_None;
+
+ Py_INCREF(res);
+ return res;
+}
+
+/** the AsIs object */
+
+/* object member list */
+
+static struct PyMemberDef asisObject_members[] = {
+ {"adapted", T_OBJECT, offsetof(asisObject, wrapped), READONLY},
+ {NULL}
+};
+
+/* object method table */
+
+static PyMethodDef asisObject_methods[] = {
+ {"getquoted", (PyCFunction)asis_getquoted, METH_NOARGS,
+ "getquoted() -> wrapped object value as SQL-quoted string"},
+ {"__conform__", (PyCFunction)asis_conform, METH_VARARGS, NULL},
+ {NULL} /* Sentinel */
+};
+
+/* initialization and finalization methods */
+
+static int
+asis_setup(asisObject *self, PyObject *obj)
+{
+ Dprintf("asis_setup: init asis object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+
+ Py_INCREF(obj);
+ self->wrapped = obj;
+
+ Dprintf("asis_setup: good asis object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+ return 0;
+}
+
+static void
+asis_dealloc(PyObject* obj)
+{
+ asisObject *self = (asisObject *)obj;
+
+ Py_CLEAR(self->wrapped);
+
+ Dprintf("asis_dealloc: deleted asis object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ obj, Py_REFCNT(obj)
+ );
+
+ Py_TYPE(obj)->tp_free(obj);
+}
+
+static int
+asis_init(PyObject *obj, PyObject *args, PyObject *kwds)
+{
+ PyObject *o;
+
+ if (!PyArg_ParseTuple(args, "O", &o))
+ return -1;
+
+ return asis_setup((asisObject *)obj, o);
+}
+
+static PyObject *
+asis_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+
+/* object type */
+
+#define asisType_doc \
+"AsIs(str) -> new AsIs adapter object"
+
+PyTypeObject asisType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "psycopg2.extensions.AsIs",
+ sizeof(asisObject), 0,
+ asis_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ (reprfunc)asis_str, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ asisType_doc, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ asisObject_methods, /*tp_methods*/
+ asisObject_members, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ asis_init, /*tp_init*/
+ 0, /*tp_alloc*/
+ asis_new, /*tp_new*/
+};
diff --git a/psycopg/adapter_asis.h b/psycopg/adapter_asis.h
new file mode 100644
index 0000000..b6c82b7
--- /dev/null
+++ b/psycopg/adapter_asis.h
@@ -0,0 +1,48 @@
+/* adapter_asis.h - definition for the psycopg AsIs type wrapper
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#ifndef PSYCOPG_ASIS_H
+#define PSYCOPG_ASIS_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern HIDDEN PyTypeObject asisType;
+
+typedef struct {
+ PyObject_HEAD
+
+ /* this is the real object we wrap */
+ PyObject *wrapped;
+
+} asisObject;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PSYCOPG_ASIS_H) */
diff --git a/psycopg/adapter_binary.c b/psycopg/adapter_binary.c
new file mode 100644
index 0000000..d6b110c
--- /dev/null
+++ b/psycopg/adapter_binary.c
@@ -0,0 +1,281 @@
+/* adapter_binary.c - Binary objects
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#define PSYCOPG_MODULE
+#include "psycopg/psycopg.h"
+
+#include "psycopg/adapter_binary.h"
+#include "psycopg/microprotocols_proto.h"
+#include "psycopg/connection.h"
+
+#include <string.h>
+
+
+/** the quoting code */
+
+static unsigned char *
+binary_escape(unsigned char *from, size_t from_length,
+ size_t *to_length, PGconn *conn)
+{
+ if (conn)
+ return PQescapeByteaConn(conn, from, from_length, to_length);
+ else
+ return PQescapeBytea(from, from_length, to_length);
+}
+
+/* binary_quote - do the quote process on plain and unicode strings */
+
+static PyObject *
+binary_quote(binaryObject *self)
+{
+ char *to = NULL;
+ const char *buffer = NULL;
+ Py_ssize_t buffer_len;
+ size_t len = 0;
+ PyObject *rv = NULL;
+ Py_buffer view;
+ int got_view = 0;
+
+ /* Allow Binary(None) to work */
+ if (self->wrapped == Py_None) {
+ Py_INCREF(psyco_null);
+ rv = psyco_null;
+ goto exit;
+ }
+
+ /* if we got a plain string or a buffer we escape it and save the buffer */
+ if (PyObject_CheckBuffer(self->wrapped)) {
+ if (0 > PyObject_GetBuffer(self->wrapped, &view, PyBUF_CONTIG_RO)) {
+ goto exit;
+ }
+ got_view = 1;
+ buffer = (const char *)(view.buf);
+ buffer_len = view.len;
+ }
+
+ if (!buffer) {
+ goto exit;
+ }
+
+ /* escape and build quoted buffer */
+
+ to = (char *)binary_escape((unsigned char*)buffer, (size_t)buffer_len,
+ &len, self->conn ? ((connectionObject*)self->conn)->pgconn : NULL);
+ if (to == NULL) {
+ PyErr_NoMemory();
+ goto exit;
+ }
+
+ if (len > 0)
+ rv = Bytes_FromFormat(
+ (self->conn && ((connectionObject*)self->conn)->equote)
+ ? "E'%s'::bytea" : "'%s'::bytea" , to);
+ else
+ rv = Bytes_FromString("''::bytea");
+
+exit:
+ if (to) { PQfreemem(to); }
+ if (got_view) { PyBuffer_Release(&view); }
+
+ /* if the wrapped object is not bytes or a buffer, this is an error */
+ if (!rv && !PyErr_Occurred()) {
+ PyErr_Format(PyExc_TypeError, "can't escape %s to binary",
+ Py_TYPE(self->wrapped)->tp_name);
+ }
+
+ return rv;
+}
+
+/* binary_str, binary_getquoted - return result of quoting */
+
+static PyObject *
+binary_getquoted(binaryObject *self, PyObject *args)
+{
+ if (self->buffer == NULL) {
+ self->buffer = binary_quote(self);
+ }
+ Py_XINCREF(self->buffer);
+ return self->buffer;
+}
+
+static PyObject *
+binary_str(binaryObject *self)
+{
+ return psyco_ensure_text(binary_getquoted(self, NULL));
+}
+
+static PyObject *
+binary_prepare(binaryObject *self, PyObject *args)
+{
+ PyObject *conn;
+
+ if (!PyArg_ParseTuple(args, "O!", &connectionType, &conn))
+ return NULL;
+
+ Py_XDECREF(self->conn);
+ self->conn = conn;
+ Py_INCREF(self->conn);
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+binary_conform(binaryObject *self, PyObject *args)
+{
+ PyObject *res, *proto;
+
+ if (!PyArg_ParseTuple(args, "O", &proto)) return NULL;
+
+ if (proto == (PyObject*)&isqlquoteType)
+ res = (PyObject*)self;
+ else
+ res = Py_None;
+
+ Py_INCREF(res);
+ return res;
+}
+
+/** the Binary object **/
+
+/* object member list */
+
+static struct PyMemberDef binaryObject_members[] = {
+ {"adapted", T_OBJECT, offsetof(binaryObject, wrapped), READONLY},
+ {"buffer", T_OBJECT, offsetof(binaryObject, buffer), READONLY},
+ {NULL}
+};
+
+/* object method table */
+
+static PyMethodDef binaryObject_methods[] = {
+ {"getquoted", (PyCFunction)binary_getquoted, METH_NOARGS,
+ "getquoted() -> wrapped object value as SQL-quoted binary string"},
+ {"prepare", (PyCFunction)binary_prepare, METH_VARARGS,
+ "prepare(conn) -> prepare for binary encoding using conn"},
+ {"__conform__", (PyCFunction)binary_conform, METH_VARARGS, NULL},
+ {NULL} /* Sentinel */
+};
+
+/* initialization and finalization methods */
+
+static int
+binary_setup(binaryObject *self, PyObject *str)
+{
+ Dprintf("binary_setup: init binary object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+
+ self->buffer = NULL;
+ self->conn = NULL;
+ Py_INCREF(str);
+ self->wrapped = str;
+
+ Dprintf("binary_setup: good binary object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self));
+ return 0;
+}
+
+static void
+binary_dealloc(PyObject* obj)
+{
+ binaryObject *self = (binaryObject *)obj;
+
+ Py_CLEAR(self->wrapped);
+ Py_CLEAR(self->buffer);
+ Py_CLEAR(self->conn);
+
+ Dprintf("binary_dealloc: deleted binary object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ obj, Py_REFCNT(obj)
+ );
+
+ Py_TYPE(obj)->tp_free(obj);
+}
+
+static int
+binary_init(PyObject *obj, PyObject *args, PyObject *kwds)
+{
+ PyObject *str;
+
+ if (!PyArg_ParseTuple(args, "O", &str))
+ return -1;
+
+ return binary_setup((binaryObject *)obj, str);
+}
+
+static PyObject *
+binary_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+
+/* object type */
+
+#define binaryType_doc \
+"Binary(buffer) -> new binary object"
+
+PyTypeObject binaryType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "psycopg2.extensions.Binary",
+ sizeof(binaryObject), 0,
+ binary_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ (reprfunc)binary_str, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ binaryType_doc, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ binaryObject_methods, /*tp_methods*/
+ binaryObject_members, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ binary_init, /*tp_init*/
+ 0, /*tp_alloc*/
+ binary_new, /*tp_new*/
+};
diff --git a/psycopg/adapter_binary.h b/psycopg/adapter_binary.h
new file mode 100644
index 0000000..54f9fb5
--- /dev/null
+++ b/psycopg/adapter_binary.h
@@ -0,0 +1,48 @@
+/* adapter_binary.h - definition for the Binary type
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#ifndef PSYCOPG_BINARY_H
+#define PSYCOPG_BINARY_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern HIDDEN PyTypeObject binaryType;
+
+typedef struct {
+ PyObject_HEAD
+
+ PyObject *wrapped;
+ PyObject *buffer;
+ PyObject *conn;
+} binaryObject;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PSYCOPG_BINARY_H) */
diff --git a/psycopg/adapter_datetime.c b/psycopg/adapter_datetime.c
new file mode 100644
index 0000000..9df26ad
--- /dev/null
+++ b/psycopg/adapter_datetime.c
@@ -0,0 +1,515 @@
+/* adapter_datetime.c - python date/time objects
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#define PSYCOPG_MODULE
+#include "psycopg/psycopg.h"
+
+#include "psycopg/adapter_datetime.h"
+#include "psycopg/microprotocols_proto.h"
+
+#include <datetime.h>
+
+#include <time.h>
+#include <string.h>
+
+
+RAISES_NEG int
+adapter_datetime_init(void)
+{
+ PyDateTime_IMPORT;
+
+ if (!PyDateTimeAPI) {
+ PyErr_SetString(PyExc_ImportError, "datetime initialization failed");
+ return -1;
+ }
+ return 0;
+}
+
+/* datetime_str, datetime_getquoted - return result of quoting */
+
+static PyObject *
+_pydatetime_string_date_time(pydatetimeObject *self)
+{
+ PyObject *rv = NULL;
+ PyObject *iso = NULL;
+ PyObject *tz;
+
+ /* Select the right PG type to cast into. */
+ char *fmt = NULL;
+ switch (self->type) {
+ case PSYCO_DATETIME_TIME:
+ tz = PyObject_GetAttrString(self->wrapped, "tzinfo");
+ if (!tz) { goto error; }
+ fmt = (tz == Py_None) ? "'%s'::time" : "'%s'::timetz";
+ Py_DECREF(tz);
+ break;
+ case PSYCO_DATETIME_DATE:
+ fmt = "'%s'::date";
+ break;
+ case PSYCO_DATETIME_TIMESTAMP:
+ tz = PyObject_GetAttrString(self->wrapped, "tzinfo");
+ if (!tz) { goto error; }
+ fmt = (tz == Py_None) ? "'%s'::timestamp" : "'%s'::timestamptz";
+ Py_DECREF(tz);
+ break;
+ }
+
+ if (!(iso = psyco_ensure_bytes(
+ PyObject_CallMethod(self->wrapped, "isoformat", NULL)))) {
+ goto error;
+ }
+
+ rv = Bytes_FromFormat(fmt, Bytes_AsString(iso));
+
+ Py_DECREF(iso);
+ return rv;
+
+error:
+ Py_XDECREF(iso);
+ return rv;
+}
+
+static PyObject *
+_pydatetime_string_delta(pydatetimeObject *self)
+{
+ PyDateTime_Delta *obj = (PyDateTime_Delta*)self->wrapped;
+
+ char buffer[8];
+ int i;
+ int a = PyDateTime_DELTA_GET_MICROSECONDS(obj);
+
+ for (i=0; i < 6 ; i++) {
+ buffer[5-i] = '0' + (a % 10);
+ a /= 10;
+ }
+ buffer[6] = '\0';
+
+ return Bytes_FromFormat("'%d days %d.%s seconds'::interval",
+ PyDateTime_DELTA_GET_DAYS(obj),
+ PyDateTime_DELTA_GET_SECONDS(obj),
+ buffer);
+}
+
+static PyObject *
+pydatetime_getquoted(pydatetimeObject *self, PyObject *args)
+{
+ if (self->type <= PSYCO_DATETIME_TIMESTAMP) {
+ return _pydatetime_string_date_time(self);
+ }
+ else {
+ return _pydatetime_string_delta(self);
+ }
+}
+
+static PyObject *
+pydatetime_str(pydatetimeObject *self)
+{
+ return psyco_ensure_text(pydatetime_getquoted(self, NULL));
+}
+
+static PyObject *
+pydatetime_conform(pydatetimeObject *self, PyObject *args)
+{
+ PyObject *res, *proto;
+
+ if (!PyArg_ParseTuple(args, "O", &proto)) return NULL;
+
+ if (proto == (PyObject*)&isqlquoteType)
+ res = (PyObject*)self;
+ else
+ res = Py_None;
+
+ Py_INCREF(res);
+ return res;
+}
+
+/** the DateTime wrapper object **/
+
+/* object member list */
+
+static struct PyMemberDef pydatetimeObject_members[] = {
+ {"adapted", T_OBJECT, offsetof(pydatetimeObject, wrapped), READONLY},
+ {"type", T_INT, offsetof(pydatetimeObject, type), READONLY},
+ {NULL}
+};
+
+/* object method table */
+
+static PyMethodDef pydatetimeObject_methods[] = {
+ {"getquoted", (PyCFunction)pydatetime_getquoted, METH_NOARGS,
+ "getquoted() -> wrapped object value as SQL date/time"},
+ {"__conform__", (PyCFunction)pydatetime_conform, METH_VARARGS, NULL},
+ {NULL} /* Sentinel */
+};
+
+/* initialization and finalization methods */
+
+static int
+pydatetime_setup(pydatetimeObject *self, PyObject *obj, int type)
+{
+ Dprintf("pydatetime_setup: init datetime object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self));
+
+ self->type = type;
+ Py_INCREF(obj);
+ self->wrapped = obj;
+
+ Dprintf("pydatetime_setup: good pydatetime object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self));
+ return 0;
+}
+
+static void
+pydatetime_dealloc(PyObject* obj)
+{
+ pydatetimeObject *self = (pydatetimeObject *)obj;
+
+ Py_CLEAR(self->wrapped);
+
+ Dprintf("mpydatetime_dealloc: deleted pydatetime object at %p, "
+ "refcnt = " FORMAT_CODE_PY_SSIZE_T, obj, Py_REFCNT(obj));
+
+ Py_TYPE(obj)->tp_free(obj);
+}
+
+static int
+pydatetime_init(PyObject *obj, PyObject *args, PyObject *kwds)
+{
+ PyObject *dt;
+ int type = -1; /* raise an error if type was not passed! */
+
+ if (!PyArg_ParseTuple(args, "O|i", &dt, &type))
+ return -1;
+
+ return pydatetime_setup((pydatetimeObject *)obj, dt, type);
+}
+
+static PyObject *
+pydatetime_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+
+/* object type */
+
+#define pydatetimeType_doc \
+"datetime(datetime, type) -> new datetime wrapper object"
+
+PyTypeObject pydatetimeType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "psycopg2._psycopg.datetime",
+ sizeof(pydatetimeObject), 0,
+ pydatetime_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ (reprfunc)pydatetime_str, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ pydatetimeType_doc, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ pydatetimeObject_methods, /*tp_methods*/
+ pydatetimeObject_members, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ pydatetime_init, /*tp_init*/
+ 0, /*tp_alloc*/
+ pydatetime_new, /*tp_new*/
+};
+
+
+/** module-level functions **/
+
+PyObject *
+psyco_Date(PyObject *self, PyObject *args)
+{
+ PyObject *res = NULL;
+ int year, month, day;
+
+ PyObject* obj = NULL;
+
+ if (!PyArg_ParseTuple(args, "iii", &year, &month, &day))
+ return NULL;
+
+ obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->DateType,
+ "iii", year, month, day);
+
+ if (obj) {
+ res = PyObject_CallFunction((PyObject *)&pydatetimeType,
+ "Oi", obj, PSYCO_DATETIME_DATE);
+ Py_DECREF(obj);
+ }
+
+ return res;
+}
+
+PyObject *
+psyco_Time(PyObject *self, PyObject *args)
+{
+ PyObject *res = NULL;
+ PyObject *tzinfo = NULL;
+ int hours, minutes=0;
+ double micro, second=0.0;
+
+ PyObject* obj = NULL;
+
+ if (!PyArg_ParseTuple(args, "iid|O", &hours, &minutes, &second,
+ &tzinfo))
+ return NULL;
+
+ micro = (second - floor(second)) * 1000000.0;
+ second = floor(second);
+
+ if (tzinfo == NULL)
+ obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->TimeType, "iiii",
+ hours, minutes, (int)second, (int)round(micro));
+ else
+ obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->TimeType, "iiiiO",
+ hours, minutes, (int)second, (int)round(micro), tzinfo);
+
+ if (obj) {
+ res = PyObject_CallFunction((PyObject *)&pydatetimeType,
+ "Oi", obj, PSYCO_DATETIME_TIME);
+ Py_DECREF(obj);
+ }
+
+ return res;
+}
+
+static PyObject *
+_psyco_Timestamp(int year, int month, int day,
+ int hour, int minute, double second, PyObject *tzinfo)
+{
+ double micro;
+ PyObject *obj;
+ PyObject *res = NULL;
+
+ micro = (second - floor(second)) * 1000000.0;
+ second = floor(second);
+
+ if (tzinfo == NULL)
+ obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->DateTimeType,
+ "iiiiiii",
+ year, month, day, hour, minute, (int)second,
+ (int)round(micro));
+ else
+ obj = PyObject_CallFunction((PyObject*)PyDateTimeAPI->DateTimeType,
+ "iiiiiiiO",
+ year, month, day, hour, minute, (int)second,
+ (int)round(micro), tzinfo);
+
+ if (obj) {
+ res = PyObject_CallFunction((PyObject *)&pydatetimeType,
+ "Oi", obj, PSYCO_DATETIME_TIMESTAMP);
+ Py_DECREF(obj);
+ }
+
+ return res;
+}
+
+PyObject *
+psyco_Timestamp(PyObject *self, PyObject *args)
+{
+ PyObject *tzinfo = NULL;
+ int year, month, day;
+ int hour=0, minute=0; /* default to midnight */
+ double second=0.0;
+
+ if (!PyArg_ParseTuple(args, "iii|iidO", &year, &month, &day,
+ &hour, &minute, &second, &tzinfo))
+ return NULL;
+
+ return _psyco_Timestamp(year, month, day, hour, minute, second, tzinfo);
+}
+
+PyObject *
+psyco_DateFromTicks(PyObject *self, PyObject *args)
+{
+ PyObject *res = NULL;
+ struct tm tm;
+ time_t t;
+ double ticks;
+
+ if (!PyArg_ParseTuple(args, "d", &ticks))
+ return NULL;
+
+ t = (time_t)floor(ticks);
+ if (localtime_r(&t, &tm)) {
+ args = Py_BuildValue("iii", tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday);
+ if (args) {
+ res = psyco_Date(self, args);
+ Py_DECREF(args);
+ }
+ }
+ else {
+ PyErr_SetString(InterfaceError, "failed localtime call");
+ }
+
+ return res;
+}
+
+PyObject *
+psyco_TimeFromTicks(PyObject *self, PyObject *args)
+{
+ PyObject *res = NULL;
+ struct tm tm;
+ time_t t;
+ double ticks;
+
+ if (!PyArg_ParseTuple(args,"d", &ticks))
+ return NULL;
+
+ t = (time_t)floor(ticks);
+ ticks -= (double)t;
+ if (localtime_r(&t, &tm)) {
+ args = Py_BuildValue("iid", tm.tm_hour, tm.tm_min,
+ (double)tm.tm_sec + ticks);
+ if (args) {
+ res = psyco_Time(self, args);
+ Py_DECREF(args);
+ }
+ }
+ else {
+ PyErr_SetString(InterfaceError, "failed localtime call");
+ }
+
+ return res;
+}
+
+PyObject *
+psyco_TimestampFromTicks(PyObject *self, PyObject *args)
+{
+ pydatetimeObject *wrapper = NULL;
+ PyObject *dt_aware = NULL;
+ PyObject *res = NULL;
+ struct tm tm;
+ time_t t;
+ double ticks;
+
+ if (!PyArg_ParseTuple(args, "d", &ticks))
+ return NULL;
+
+ t = (time_t)floor(ticks);
+ ticks -= (double)t;
+ if (!localtime_r(&t, &tm)) {
+ PyErr_SetString(InterfaceError, "failed localtime call");
+ goto exit;
+ }
+
+ /* Convert the tm to a wrapper containing a naive datetime.datetime */
+ if (!(wrapper = (pydatetimeObject *)_psyco_Timestamp(
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, (double)tm.tm_sec + ticks, NULL))) {
+ goto exit;
+ }
+
+ /* Localize the datetime and assign it back to the wrapper */
+ if (!(dt_aware = PyObject_CallMethod(
+ wrapper->wrapped, "astimezone", NULL))) {
+ goto exit;
+ }
+ Py_CLEAR(wrapper->wrapped);
+ wrapper->wrapped = dt_aware;
+ dt_aware = NULL;
+
+ /* the wrapper is ready to be returned */
+ res = (PyObject *)wrapper;
+ wrapper = NULL;
+
+exit:
+ Py_XDECREF(dt_aware);
+ Py_XDECREF(wrapper);
+ return res;
+}
+
+PyObject *
+psyco_DateFromPy(PyObject *self, PyObject *args)
+{
+ PyObject *obj;
+
+ if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->DateType, &obj))
+ return NULL;
+
+ return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj,
+ PSYCO_DATETIME_DATE);
+}
+
+PyObject *
+psyco_TimeFromPy(PyObject *self, PyObject *args)
+{
+ PyObject *obj;
+
+ if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->TimeType, &obj))
+ return NULL;
+
+ return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj,
+ PSYCO_DATETIME_TIME);
+}
+
+PyObject *
+psyco_TimestampFromPy(PyObject *self, PyObject *args)
+{
+ PyObject *obj;
+
+ if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->DateTimeType, &obj))
+ return NULL;
+
+ return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj,
+ PSYCO_DATETIME_TIMESTAMP);
+}
+
+PyObject *
+psyco_IntervalFromPy(PyObject *self, PyObject *args)
+{
+ PyObject *obj;
+
+ if (!PyArg_ParseTuple(args, "O!", PyDateTimeAPI->DeltaType, &obj))
+ return NULL;
+
+ return PyObject_CallFunction((PyObject *)&pydatetimeType, "Oi", obj,
+ PSYCO_DATETIME_INTERVAL);
+}
diff --git a/psycopg/adapter_datetime.h b/psycopg/adapter_datetime.h
new file mode 100644
index 0000000..7705db3
--- /dev/null
+++ b/psycopg/adapter_datetime.h
@@ -0,0 +1,107 @@
+/* adapter_datetime.h - definition for the python date/time types
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#ifndef PSYCOPG_DATETIME_H
+#define PSYCOPG_DATETIME_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern HIDDEN PyTypeObject pydatetimeType;
+
+typedef struct {
+ PyObject_HEAD
+
+ PyObject *wrapped;
+ int type;
+#define PSYCO_DATETIME_TIME 0
+#define PSYCO_DATETIME_DATE 1
+#define PSYCO_DATETIME_TIMESTAMP 2
+#define PSYCO_DATETIME_INTERVAL 3
+
+} pydatetimeObject;
+
+
+RAISES_NEG HIDDEN int adapter_datetime_init(void);
+
+HIDDEN PyObject *psyco_Date(PyObject *module, PyObject *args);
+#define psyco_Date_doc \
+ "Date(year, month, day) -> new date\n\n" \
+ "Build an object holding a date value."
+
+HIDDEN PyObject *psyco_Time(PyObject *module, PyObject *args);
+#define psyco_Time_doc \
+ "Time(hour, minutes, seconds, tzinfo=None) -> new time\n\n" \
+ "Build an object holding a time value."
+
+HIDDEN PyObject *psyco_Timestamp(PyObject *module, PyObject *args);
+#define psyco_Timestamp_doc \
+ "Timestamp(year, month, day, hour, minutes, seconds, tzinfo=None) -> new timestamp\n\n" \
+ "Build an object holding a timestamp value."
+
+HIDDEN PyObject *psyco_DateFromTicks(PyObject *module, PyObject *args);
+#define psyco_DateFromTicks_doc \
+ "DateFromTicks(ticks) -> new date\n\n" \
+ "Build an object holding a date value from the given ticks value.\n\n" \
+ "Ticks are the number of seconds since the epoch; see the documentation " \
+ "of the standard Python time module for details)."
+
+HIDDEN PyObject *psyco_TimeFromTicks(PyObject *module, PyObject *args);
+#define psyco_TimeFromTicks_doc \
+ "TimeFromTicks(ticks) -> new time\n\n" \
+ "Build an object holding a time value from the given ticks value.\n\n" \
+ "Ticks are the number of seconds since the epoch; see the documentation " \
+ "of the standard Python time module for details)."
+
+HIDDEN PyObject *psyco_TimestampFromTicks(PyObject *module, PyObject *args);
+#define psyco_TimestampFromTicks_doc \
+ "TimestampFromTicks(ticks) -> new timestamp\n\n" \
+ "Build an object holding a timestamp value from the given ticks value.\n\n" \
+ "Ticks are the number of seconds since the epoch; see the documentation " \
+ "of the standard Python time module for details)."
+
+HIDDEN PyObject *psyco_DateFromPy(PyObject *module, PyObject *args);
+#define psyco_DateFromPy_doc \
+ "DateFromPy(datetime.date) -> new wrapper"
+
+HIDDEN PyObject *psyco_TimeFromPy(PyObject *module, PyObject *args);
+#define psyco_TimeFromPy_doc \
+ "TimeFromPy(datetime.time) -> new wrapper"
+
+HIDDEN PyObject *psyco_TimestampFromPy(PyObject *module, PyObject *args);
+#define psyco_TimestampFromPy_doc \
+ "TimestampFromPy(datetime.datetime) -> new wrapper"
+
+HIDDEN PyObject *psyco_IntervalFromPy(PyObject *module, PyObject *args);
+#define psyco_IntervalFromPy_doc \
+ "IntervalFromPy(datetime.timedelta) -> new wrapper"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PSYCOPG_DATETIME_H) */
diff --git a/psycopg/adapter_list.c b/psycopg/adapter_list.c
new file mode 100644
index 0000000..e22292b
--- /dev/null
+++ b/psycopg/adapter_list.c
@@ -0,0 +1,342 @@
+/* adapter_list.c - python list objects
+ *
+ * Copyright (C) 2004-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#define PSYCOPG_MODULE
+#include "psycopg/psycopg.h"
+
+#include "psycopg/adapter_list.h"
+#include "psycopg/microprotocols.h"
+#include "psycopg/microprotocols_proto.h"
+
+
+/* list_str, list_getquoted - return result of quoting */
+
+static PyObject *
+list_quote(listObject *self)
+{
+ /* adapt the list by calling adapt() recursively and then wrapping
+ everything into "ARRAY[]" */
+ PyObject *res = NULL;
+ PyObject **qs = NULL;
+ Py_ssize_t bufsize = 0;
+ char *buf = NULL, *ptr;
+
+ /* list consisting of only NULL don't work with the ARRAY[] construct
+ * so we use the {NULL,...} syntax. The same syntax is also necessary
+ * to convert array of arrays containing only nulls. */
+ int all_nulls = 1;
+
+ Py_ssize_t i, len;
+
+ len = PyList_GET_SIZE(self->wrapped);
+
+ /* empty arrays are converted to NULLs (still searching for a way to
+ insert an empty array in postgresql */
+ if (len == 0) {
+ /* it cannot be ARRAY[] because it would make empty lists unusable
+ * in any() without a cast. But we may convert it into ARRAY[] below */
+ res = Bytes_FromString("'{}'");
+ goto exit;
+ }
+
+ if (!(qs = PyMem_New(PyObject *, len))) {
+ PyErr_NoMemory();
+ goto exit;
+ }
+ memset(qs, 0, len * sizeof(PyObject *));
+
+ for (i = 0; i < len; i++) {
+ PyObject *wrapped = PyList_GET_ITEM(self->wrapped, i);
+ if (wrapped == Py_None) {
+ Py_INCREF(psyco_null);
+ qs[i] = psyco_null;
+ }
+ else {
+ if (!(qs[i] = microprotocol_getquoted(
+ wrapped, (connectionObject*)self->connection))) {
+ goto exit;
+ }
+
+ /* Lists of arrays containing only nulls are also not supported
+ * by the ARRAY construct so we should do some special casing */
+ if (PyList_Check(wrapped)) {
+ if (Bytes_AS_STRING(qs[i])[0] == 'A') {
+ all_nulls = 0;
+ }
+ else if (0 == strcmp(Bytes_AS_STRING(qs[i]), "'{}'")) {
+ /* case of issue #788: '{{}}' is not supported but
+ * array[array[]] is */
+ all_nulls = 0;
+ Py_CLEAR(qs[i]);
+ if (!(qs[i] = Bytes_FromString("ARRAY[]"))) {
+ goto exit;
+ }
+ }
+ }
+ else {
+ all_nulls = 0;
+ }
+ }
+ bufsize += Bytes_GET_SIZE(qs[i]) + 1; /* this, and a comma */
+ }
+
+ /* Create an array literal, usually ARRAY[...] but if the contents are
+ * all NULL or array of NULL we must use the '{...}' syntax
+ */
+ if (!(ptr = buf = PyMem_Malloc(bufsize + 8))) {
+ PyErr_NoMemory();
+ goto exit;
+ }
+
+ if (!all_nulls) {
+ strcpy(ptr, "ARRAY[");
+ ptr += 6;
+ for (i = 0; i < len; i++) {
+ Py_ssize_t sl;
+ sl = Bytes_GET_SIZE(qs[i]);
+ memcpy(ptr, Bytes_AS_STRING(qs[i]), sl);
+ ptr += sl;
+ *ptr++ = ',';
+ }
+ *(ptr - 1) = ']';
+ }
+ else {
+ *ptr++ = '\'';
+ *ptr++ = '{';
+ for (i = 0; i < len; i++) {
+ /* in case all the adapted things are nulls (or array of nulls),
+ * the quoted string is either NULL or an array of the form
+ * '{NULL,...}', in which case we have to strip the extra quotes */
+ char *s;
+ Py_ssize_t sl;
+ s = Bytes_AS_STRING(qs[i]);
+ sl = Bytes_GET_SIZE(qs[i]);
+ if (s[0] != '\'') {
+ memcpy(ptr, s, sl);
+ ptr += sl;
+ }
+ else {
+ memcpy(ptr, s + 1, sl - 2);
+ ptr += sl - 2;
+ }
+ *ptr++ = ',';
+ }
+ *(ptr - 1) = '}';
+ *ptr++ = '\'';
+ }
+
+ res = Bytes_FromStringAndSize(buf, ptr - buf);
+
+exit:
+ if (qs) {
+ for (i = 0; i < len; i++) {
+ PyObject *q = qs[i];
+ Py_XDECREF(q);
+ }
+ PyMem_Free(qs);
+ }
+ PyMem_Free(buf);
+
+ return res;
+}
+
+static PyObject *
+list_str(listObject *self)
+{
+ return psyco_ensure_text(list_quote(self));
+}
+
+static PyObject *
+list_getquoted(listObject *self, PyObject *args)
+{
+ return list_quote(self);
+}
+
+static PyObject *
+list_prepare(listObject *self, PyObject *args)
+{
+ PyObject *conn;
+
+ if (!PyArg_ParseTuple(args, "O!", &connectionType, &conn))
+ return NULL;
+
+ Py_CLEAR(self->connection);
+ Py_INCREF(conn);
+ self->connection = conn;
+
+ Py_RETURN_NONE;
+}
+
+static PyObject *
+list_conform(listObject *self, PyObject *args)
+{
+ PyObject *res, *proto;
+
+ if (!PyArg_ParseTuple(args, "O", &proto)) return NULL;
+
+ if (proto == (PyObject*)&isqlquoteType)
+ res = (PyObject*)self;
+ else
+ res = Py_None;
+
+ Py_INCREF(res);
+ return res;
+}
+
+/** the DateTime wrapper object **/
+
+/* object member list */
+
+static struct PyMemberDef listObject_members[] = {
+ {"adapted", T_OBJECT, offsetof(listObject, wrapped), READONLY},
+ {NULL}
+};
+
+/* object method table */
+
+static PyMethodDef listObject_methods[] = {
+ {"getquoted", (PyCFunction)list_getquoted, METH_NOARGS,
+ "getquoted() -> wrapped object value as SQL date/time"},
+ {"prepare", (PyCFunction)list_prepare, METH_VARARGS,
+ "prepare(conn) -> set encoding to conn->encoding"},
+ {"__conform__", (PyCFunction)list_conform, METH_VARARGS, NULL},
+ {NULL} /* Sentinel */
+};
+
+/* initialization and finalization methods */
+
+static int
+list_setup(listObject *self, PyObject *obj)
+{
+ Dprintf("list_setup: init list object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+
+ if (!PyList_Check(obj))
+ return -1;
+
+ self->connection = NULL;
+ Py_INCREF(obj);
+ self->wrapped = obj;
+
+ Dprintf("list_setup: good list object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+ return 0;
+}
+
+static int
+list_traverse(listObject *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->wrapped);
+ Py_VISIT(self->connection);
+ return 0;
+}
+
+static int
+list_clear(listObject *self)
+{
+ Py_CLEAR(self->wrapped);
+ Py_CLEAR(self->connection);
+ return 0;
+}
+
+static void
+list_dealloc(listObject* self)
+{
+ PyObject_GC_UnTrack((PyObject *)self);
+ list_clear(self);
+
+ Dprintf("list_dealloc: deleted list object at %p, "
+ "refcnt = " FORMAT_CODE_PY_SSIZE_T, self, Py_REFCNT(self));
+
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+static int
+list_init(PyObject *obj, PyObject *args, PyObject *kwds)
+{
+ PyObject *l;
+
+ if (!PyArg_ParseTuple(args, "O", &l))
+ return -1;
+
+ return list_setup((listObject *)obj, l);
+}
+
+static PyObject *
+list_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+
+/* object type */
+
+#define listType_doc \
+"List(list) -> new list wrapper object"
+
+PyTypeObject listType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "psycopg2._psycopg.List",
+ sizeof(listObject), 0,
+ (destructor)list_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ (reprfunc)list_str, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ listType_doc, /*tp_doc*/
+ (traverseproc)list_traverse, /*tp_traverse*/
+ (inquiry)list_clear, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ listObject_methods, /*tp_methods*/
+ listObject_members, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ list_init, /*tp_init*/
+ 0, /*tp_alloc*/
+ list_new, /*tp_new*/
+};
diff --git a/psycopg/adapter_list.h b/psycopg/adapter_list.h
new file mode 100644
index 0000000..2e00b53
--- /dev/null
+++ b/psycopg/adapter_list.h
@@ -0,0 +1,47 @@
+/* adapter_list.h - definition for the python list types
+ *
+ * Copyright (C) 2004-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#ifndef PSYCOPG_LIST_H
+#define PSYCOPG_LIST_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern HIDDEN PyTypeObject listType;
+
+typedef struct {
+ PyObject_HEAD
+
+ PyObject *wrapped;
+ PyObject *connection;
+} listObject;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PSYCOPG_LIST_H) */
diff --git a/psycopg/adapter_pboolean.c b/psycopg/adapter_pboolean.c
new file mode 100644
index 0000000..6a28119
--- /dev/null
+++ b/psycopg/adapter_pboolean.c
@@ -0,0 +1,185 @@
+/* adapter_pboolean.c - psycopg boolean type wrapper implementation
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#define PSYCOPG_MODULE
+#include "psycopg/psycopg.h"
+
+#include "psycopg/adapter_pboolean.h"
+#include "psycopg/microprotocols_proto.h"
+
+#include <string.h>
+
+
+/** the Boolean object **/
+
+static PyObject *
+pboolean_getquoted(pbooleanObject *self, PyObject *args)
+{
+ if (PyObject_IsTrue(self->wrapped)) {
+ return Bytes_FromString("true");
+ }
+ else {
+ return Bytes_FromString("false");
+ }
+}
+
+static PyObject *
+pboolean_str(pbooleanObject *self)
+{
+ return psyco_ensure_text(pboolean_getquoted(self, NULL));
+}
+
+static PyObject *
+pboolean_conform(pbooleanObject *self, PyObject *args)
+{
+ PyObject *res, *proto;
+
+ if (!PyArg_ParseTuple(args, "O", &proto)) return NULL;
+
+ if (proto == (PyObject*)&isqlquoteType)
+ res = (PyObject*)self;
+ else
+ res = Py_None;
+
+ Py_INCREF(res);
+ return res;
+}
+
+/** the Boolean object */
+
+/* object member list */
+
+static struct PyMemberDef pbooleanObject_members[] = {
+ {"adapted", T_OBJECT, offsetof(pbooleanObject, wrapped), READONLY},
+ {NULL}
+};
+
+/* object method table */
+
+static PyMethodDef pbooleanObject_methods[] = {
+ {"getquoted", (PyCFunction)pboolean_getquoted, METH_NOARGS,
+ "getquoted() -> wrapped object value as SQL-quoted string"},
+ {"__conform__", (PyCFunction)pboolean_conform, METH_VARARGS, NULL},
+ {NULL} /* Sentinel */
+};
+
+/* initialization and finalization methods */
+
+static int
+pboolean_setup(pbooleanObject *self, PyObject *obj)
+{
+ Dprintf("pboolean_setup: init pboolean object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+
+ Py_INCREF(obj);
+ self->wrapped = obj;
+
+ Dprintf("pboolean_setup: good pboolean object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+ return 0;
+}
+
+static void
+pboolean_dealloc(PyObject* obj)
+{
+ pbooleanObject *self = (pbooleanObject *)obj;
+
+ Py_CLEAR(self->wrapped);
+
+ Dprintf("pboolean_dealloc: deleted pboolean object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ obj, Py_REFCNT(obj)
+ );
+
+ Py_TYPE(obj)->tp_free(obj);
+}
+
+static int
+pboolean_init(PyObject *obj, PyObject *args, PyObject *kwds)
+{
+ PyObject *o;
+
+ if (!PyArg_ParseTuple(args, "O", &o))
+ return -1;
+
+ return pboolean_setup((pbooleanObject *)obj, o);
+}
+
+static PyObject *
+pboolean_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+
+/* object type */
+
+#define pbooleanType_doc \
+"Boolean(str) -> new Boolean adapter object"
+
+PyTypeObject pbooleanType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "psycopg2.extensions.Boolean",
+ sizeof(pbooleanObject), 0,
+ pboolean_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ 0, /*tp_call*/
+ (reprfunc)pboolean_str, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ pbooleanType_doc, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ pbooleanObject_methods, /*tp_methods*/
+ pbooleanObject_members, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ pboolean_init, /*tp_init*/
+ 0, /*tp_alloc*/
+ pboolean_new, /*tp_new*/
+};
diff --git a/psycopg/adapter_pboolean.h b/psycopg/adapter_pboolean.h
new file mode 100644
index 0000000..562fedc
--- /dev/null
+++ b/psycopg/adapter_pboolean.h
@@ -0,0 +1,48 @@
+/* adapter_pboolean.h - definition for the psycopg boolean type wrapper
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#ifndef PSYCOPG_PBOOLEAN_H
+#define PSYCOPG_PBOOLEAN_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern HIDDEN PyTypeObject pbooleanType;
+
+typedef struct {
+ PyObject_HEAD
+
+ /* this is the real object we wrap */
+ PyObject *wrapped;
+
+} pbooleanObject;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PSYCOPG_PBOOLEAN_H) */
diff --git a/psycopg/adapter_pdecimal.c b/psycopg/adapter_pdecimal.c
new file mode 100644
index 0000000..25a7212
--- /dev/null
+++ b/psycopg/adapter_pdecimal.c
@@ -0,0 +1,248 @@
+/* adapter_pdecimal.c - psycopg Decimal type wrapper implementation
+ *
+ * Copyright (C) 2003-2019 Federico Di Gregorio <fog@debian.org>
+ * Copyright (C) 2020-2021 The Psycopg Team
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.