diff --git a/server/www/packages/packages-common/cryptography/__about__.py b/server/www/packages/packages-common/cryptography/__about__.py new file mode 100644 index 0000000..a99f58f --- /dev/null +++ b/server/www/packages/packages-common/cryptography/__about__.py @@ -0,0 +1,23 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] + +__title__ = "cryptography" +__summary__ = ("cryptography is a package which provides cryptographic recipes" + " and primitives to Python developers.") +__uri__ = "https://github.com/pyca/cryptography" + +__version__ = "2.3.1" + +__author__ = "The cryptography developers" +__email__ = "cryptography-dev@python.org" + +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2013-2017 {0}".format(__author__) diff --git a/server/www/packages/packages-common/cryptography/__init__.py b/server/www/packages/packages-common/cryptography/__init__.py new file mode 100644 index 0000000..6da0b38 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/__init__.py @@ -0,0 +1,16 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.__about__ import ( + __author__, __copyright__, __email__, __license__, __summary__, __title__, + __uri__, __version__ +) + + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] diff --git a/server/www/packages/packages-common/cryptography/exceptions.py b/server/www/packages/packages-common/cryptography/exceptions.py new file mode 100644 index 0000000..648cf9d --- /dev/null +++ b/server/www/packages/packages-common/cryptography/exceptions.py @@ -0,0 +1,57 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from enum import Enum + + +class _Reasons(Enum): + BACKEND_MISSING_INTERFACE = 0 + UNSUPPORTED_HASH = 1 + UNSUPPORTED_CIPHER = 2 + UNSUPPORTED_PADDING = 3 + UNSUPPORTED_MGF = 4 + UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5 + UNSUPPORTED_ELLIPTIC_CURVE = 6 + UNSUPPORTED_SERIALIZATION = 7 + UNSUPPORTED_X509 = 8 + UNSUPPORTED_EXCHANGE_ALGORITHM = 9 + UNSUPPORTED_DIFFIE_HELLMAN = 10 + + +class UnsupportedAlgorithm(Exception): + def __init__(self, message, reason=None): + super(UnsupportedAlgorithm, self).__init__(message) + self._reason = reason + + +class AlreadyFinalized(Exception): + pass + + +class AlreadyUpdated(Exception): + pass + + +class NotYetFinalized(Exception): + pass + + +class InvalidTag(Exception): + pass + + +class InvalidSignature(Exception): + pass + + +class InternalError(Exception): + def __init__(self, msg, err_code): + super(InternalError, self).__init__(msg) + self.err_code = err_code + + +class InvalidKey(Exception): + pass diff --git a/server/www/packages/packages-common/cryptography/fernet.py b/server/www/packages/packages-common/cryptography/fernet.py new file mode 100644 index 0000000..ac2dd0b --- /dev/null +++ b/server/www/packages/packages-common/cryptography/fernet.py @@ -0,0 +1,173 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import base64 +import binascii +import os +import struct +import time + +import six + +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, padding +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +from cryptography.hazmat.primitives.hmac import HMAC + + +class InvalidToken(Exception): + pass + + +_MAX_CLOCK_SKEW = 60 + + +class Fernet(object): + def __init__(self, key, backend=None): + if backend is None: + backend = default_backend() + + key = base64.urlsafe_b64decode(key) + if len(key) != 32: + raise ValueError( + "Fernet key must be 32 url-safe base64-encoded bytes." + ) + + self._signing_key = key[:16] + self._encryption_key = key[16:] + self._backend = backend + + @classmethod + def generate_key(cls): + return base64.urlsafe_b64encode(os.urandom(32)) + + def encrypt(self, data): + current_time = int(time.time()) + iv = os.urandom(16) + return self._encrypt_from_parts(data, current_time, iv) + + def _encrypt_from_parts(self, data, current_time, iv): + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + + padder = padding.PKCS7(algorithms.AES.block_size).padder() + padded_data = padder.update(data) + padder.finalize() + encryptor = Cipher( + algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend + ).encryptor() + ciphertext = encryptor.update(padded_data) + encryptor.finalize() + + basic_parts = ( + b"\x80" + struct.pack(">Q", current_time) + iv + ciphertext + ) + + h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend) + h.update(basic_parts) + hmac = h.finalize() + return base64.urlsafe_b64encode(basic_parts + hmac) + + def decrypt(self, token, ttl=None): + timestamp, data = Fernet._get_unverified_token_data(token) + return self._decrypt_data(data, timestamp, ttl) + + def extract_timestamp(self, token): + timestamp, data = Fernet._get_unverified_token_data(token) + # Verify the token was not tampered with. + self._verify_signature(data) + return timestamp + + @staticmethod + def _get_unverified_token_data(token): + if not isinstance(token, bytes): + raise TypeError("token must be bytes.") + + try: + data = base64.urlsafe_b64decode(token) + except (TypeError, binascii.Error): + raise InvalidToken + + if not data or six.indexbytes(data, 0) != 0x80: + raise InvalidToken + + try: + timestamp, = struct.unpack(">Q", data[1:9]) + except struct.error: + raise InvalidToken + return timestamp, data + + def _verify_signature(self, data): + h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend) + h.update(data[:-32]) + try: + h.verify(data[-32:]) + except InvalidSignature: + raise InvalidToken + + def _decrypt_data(self, data, timestamp, ttl): + current_time = int(time.time()) + if ttl is not None: + if timestamp + ttl < current_time: + raise InvalidToken + + if current_time + _MAX_CLOCK_SKEW < timestamp: + raise InvalidToken + + self._verify_signature(data) + + iv = data[9:25] + ciphertext = data[25:-32] + decryptor = Cipher( + algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend + ).decryptor() + plaintext_padded = decryptor.update(ciphertext) + try: + plaintext_padded += decryptor.finalize() + except ValueError: + raise InvalidToken + unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder() + + unpadded = unpadder.update(plaintext_padded) + try: + unpadded += unpadder.finalize() + except ValueError: + raise InvalidToken + return unpadded + + +class MultiFernet(object): + def __init__(self, fernets): + fernets = list(fernets) + if not fernets: + raise ValueError( + "MultiFernet requires at least one Fernet instance" + ) + self._fernets = fernets + + def encrypt(self, msg): + return self._fernets[0].encrypt(msg) + + def rotate(self, msg): + timestamp, data = Fernet._get_unverified_token_data(msg) + for f in self._fernets: + try: + p = f._decrypt_data(data, timestamp, None) + break + except InvalidToken: + pass + else: + raise InvalidToken + + iv = os.urandom(16) + return self._fernets[0]._encrypt_from_parts(p, timestamp, iv) + + def decrypt(self, msg, ttl=None): + for f in self._fernets: + try: + return f.decrypt(msg, ttl) + except InvalidToken: + pass + raise InvalidToken diff --git a/server/www/packages/packages-common/cryptography/hazmat/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/__init__.py new file mode 100644 index 0000000..9f06a99 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/__init__.py @@ -0,0 +1,11 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +Hazardous Materials + +This is a "Hazardous Materials" module. You should ONLY use it if you're +100% absolutely sure that you know what you're doing because this module +is full of land mines, dragons, and dinosaurs with laser guns. +""" +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/backends/__init__.py new file mode 100644 index 0000000..565bde7 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/__init__.py @@ -0,0 +1,18 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +_default_backend = None + + +def default_backend(): + global _default_backend + + if _default_backend is None: + from cryptography.hazmat.backends.openssl.backend import backend + _default_backend = backend + + return _default_backend diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/interfaces.py b/server/www/packages/packages-common/cryptography/hazmat/backends/interfaces.py new file mode 100644 index 0000000..0a476b9 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/interfaces.py @@ -0,0 +1,395 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class CipherBackend(object): + @abc.abstractmethod + def cipher_supported(self, cipher, mode): + """ + Return True if the given cipher and mode are supported. + """ + + @abc.abstractmethod + def create_symmetric_encryption_ctx(self, cipher, mode): + """ + Get a CipherContext that can be used for encryption. + """ + + @abc.abstractmethod + def create_symmetric_decryption_ctx(self, cipher, mode): + """ + Get a CipherContext that can be used for decryption. + """ + + +@six.add_metaclass(abc.ABCMeta) +class HashBackend(object): + @abc.abstractmethod + def hash_supported(self, algorithm): + """ + Return True if the hash algorithm is supported by this backend. + """ + + @abc.abstractmethod + def create_hash_ctx(self, algorithm): + """ + Create a HashContext for calculating a message digest. + """ + + +@six.add_metaclass(abc.ABCMeta) +class HMACBackend(object): + @abc.abstractmethod + def hmac_supported(self, algorithm): + """ + Return True if the hash algorithm is supported for HMAC by this + backend. + """ + + @abc.abstractmethod + def create_hmac_ctx(self, key, algorithm): + """ + Create a MACContext for calculating a message authentication code. + """ + + +@six.add_metaclass(abc.ABCMeta) +class CMACBackend(object): + @abc.abstractmethod + def cmac_algorithm_supported(self, algorithm): + """ + Returns True if the block cipher is supported for CMAC by this backend + """ + + @abc.abstractmethod + def create_cmac_ctx(self, algorithm): + """ + Create a MACContext for calculating a message authentication code. + """ + + +@six.add_metaclass(abc.ABCMeta) +class PBKDF2HMACBackend(object): + @abc.abstractmethod + def pbkdf2_hmac_supported(self, algorithm): + """ + Return True if the hash algorithm is supported for PBKDF2 by this + backend. + """ + + @abc.abstractmethod + def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations, + key_material): + """ + Return length bytes derived from provided PBKDF2 parameters. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RSABackend(object): + @abc.abstractmethod + def generate_rsa_private_key(self, public_exponent, key_size): + """ + Generate an RSAPrivateKey instance with public_exponent and a modulus + of key_size bits. + """ + + @abc.abstractmethod + def rsa_padding_supported(self, padding): + """ + Returns True if the backend supports the given padding options. + """ + + @abc.abstractmethod + def generate_rsa_parameters_supported(self, public_exponent, key_size): + """ + Returns True if the backend supports the given parameters for key + generation. + """ + + @abc.abstractmethod + def load_rsa_private_numbers(self, numbers): + """ + Returns an RSAPrivateKey provider. + """ + + @abc.abstractmethod + def load_rsa_public_numbers(self, numbers): + """ + Returns an RSAPublicKey provider. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSABackend(object): + @abc.abstractmethod + def generate_dsa_parameters(self, key_size): + """ + Generate a DSAParameters instance with a modulus of key_size bits. + """ + + @abc.abstractmethod + def generate_dsa_private_key(self, parameters): + """ + Generate a DSAPrivateKey instance with parameters as a DSAParameters + object. + """ + + @abc.abstractmethod + def generate_dsa_private_key_and_parameters(self, key_size): + """ + Generate a DSAPrivateKey instance using key size only. + """ + + @abc.abstractmethod + def dsa_hash_supported(self, algorithm): + """ + Return True if the hash algorithm is supported by the backend for DSA. + """ + + @abc.abstractmethod + def dsa_parameters_supported(self, p, q, g): + """ + Return True if the parameters are supported by the backend for DSA. + """ + + @abc.abstractmethod + def load_dsa_private_numbers(self, numbers): + """ + Returns a DSAPrivateKey provider. + """ + + @abc.abstractmethod + def load_dsa_public_numbers(self, numbers): + """ + Returns a DSAPublicKey provider. + """ + + @abc.abstractmethod + def load_dsa_parameter_numbers(self, numbers): + """ + Returns a DSAParameters provider. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurveBackend(object): + @abc.abstractmethod + def elliptic_curve_signature_algorithm_supported( + self, signature_algorithm, curve + ): + """ + Returns True if the backend supports the named elliptic curve with the + specified signature algorithm. + """ + + @abc.abstractmethod + def elliptic_curve_supported(self, curve): + """ + Returns True if the backend supports the named elliptic curve. + """ + + @abc.abstractmethod + def generate_elliptic_curve_private_key(self, curve): + """ + Return an object conforming to the EllipticCurvePrivateKey interface. + """ + + @abc.abstractmethod + def load_elliptic_curve_public_numbers(self, numbers): + """ + Return an EllipticCurvePublicKey provider using the given numbers. + """ + + @abc.abstractmethod + def load_elliptic_curve_private_numbers(self, numbers): + """ + Return an EllipticCurvePrivateKey provider using the given numbers. + """ + + @abc.abstractmethod + def elliptic_curve_exchange_algorithm_supported(self, algorithm, curve): + """ + Returns whether the exchange algorithm is supported by this backend. + """ + + @abc.abstractmethod + def derive_elliptic_curve_private_key(self, private_value, curve): + """ + Compute the private key given the private value and curve. + """ + + +@six.add_metaclass(abc.ABCMeta) +class PEMSerializationBackend(object): + @abc.abstractmethod + def load_pem_private_key(self, data, password): + """ + Loads a private key from PEM encoded data, using the provided password + if the data is encrypted. + """ + + @abc.abstractmethod + def load_pem_public_key(self, data): + """ + Loads a public key from PEM encoded data. + """ + + @abc.abstractmethod + def load_pem_parameters(self, data): + """ + Load encryption parameters from PEM encoded data. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DERSerializationBackend(object): + @abc.abstractmethod + def load_der_private_key(self, data, password): + """ + Loads a private key from DER encoded data. Uses the provided password + if the data is encrypted. + """ + + @abc.abstractmethod + def load_der_public_key(self, data): + """ + Loads a public key from DER encoded data. + """ + + @abc.abstractmethod + def load_der_parameters(self, data): + """ + Load encryption parameters from DER encoded data. + """ + + +@six.add_metaclass(abc.ABCMeta) +class X509Backend(object): + @abc.abstractmethod + def load_pem_x509_certificate(self, data): + """ + Load an X.509 certificate from PEM encoded data. + """ + + @abc.abstractmethod + def load_der_x509_certificate(self, data): + """ + Load an X.509 certificate from DER encoded data. + """ + + @abc.abstractmethod + def load_der_x509_csr(self, data): + """ + Load an X.509 CSR from DER encoded data. + """ + + @abc.abstractmethod + def load_pem_x509_csr(self, data): + """ + Load an X.509 CSR from PEM encoded data. + """ + + @abc.abstractmethod + def create_x509_csr(self, builder, private_key, algorithm): + """ + Create and sign an X.509 CSR from a CSR builder object. + """ + + @abc.abstractmethod + def create_x509_certificate(self, builder, private_key, algorithm): + """ + Create and sign an X.509 certificate from a CertificateBuilder object. + """ + + @abc.abstractmethod + def create_x509_crl(self, builder, private_key, algorithm): + """ + Create and sign an X.509 CertificateRevocationList from a + CertificateRevocationListBuilder object. + """ + + @abc.abstractmethod + def create_x509_revoked_certificate(self, builder): + """ + Create a RevokedCertificate object from a RevokedCertificateBuilder + object. + """ + + @abc.abstractmethod + def x509_name_bytes(self, name): + """ + Compute the DER encoded bytes of an X509 Name object. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DHBackend(object): + @abc.abstractmethod + def generate_dh_parameters(self, generator, key_size): + """ + Generate a DHParameters instance with a modulus of key_size bits. + Using the given generator. Often 2 or 5. + """ + + @abc.abstractmethod + def generate_dh_private_key(self, parameters): + """ + Generate a DHPrivateKey instance with parameters as a DHParameters + object. + """ + + @abc.abstractmethod + def generate_dh_private_key_and_parameters(self, generator, key_size): + """ + Generate a DHPrivateKey instance using key size only. + Using the given generator. Often 2 or 5. + """ + + @abc.abstractmethod + def load_dh_private_numbers(self, numbers): + """ + Load a DHPrivateKey from DHPrivateNumbers + """ + + @abc.abstractmethod + def load_dh_public_numbers(self, numbers): + """ + Load a DHPublicKey from DHPublicNumbers. + """ + + @abc.abstractmethod + def load_dh_parameter_numbers(self, numbers): + """ + Load DHParameters from DHParameterNumbers. + """ + + @abc.abstractmethod + def dh_parameters_supported(self, p, g, q=None): + """ + Returns whether the backend supports DH with these parameter values. + """ + + @abc.abstractmethod + def dh_x942_serialization_supported(self): + """ + Returns True if the backend supports the serialization of DH objects + with subgroup order (q). + """ + + +@six.add_metaclass(abc.ABCMeta) +class ScryptBackend(object): + @abc.abstractmethod + def derive_scrypt(self, key_material, salt, length, n, r, p): + """ + Return bytes derived from provided Scrypt parameters. + """ diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/__init__.py new file mode 100644 index 0000000..8eadeb6 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/__init__.py @@ -0,0 +1,10 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.hazmat.backends.openssl.backend import backend + + +__all__ = ["backend"] diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/aead.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/aead.py new file mode 100644 index 0000000..9cec3e2 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/aead.py @@ -0,0 +1,159 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.exceptions import InvalidTag + + +_ENCRYPT = 1 +_DECRYPT = 0 + + +def _aead_cipher_name(cipher): + from cryptography.hazmat.primitives.ciphers.aead import ( + AESCCM, AESGCM, ChaCha20Poly1305 + ) + if isinstance(cipher, ChaCha20Poly1305): + return b"chacha20-poly1305" + elif isinstance(cipher, AESCCM): + return "aes-{0}-ccm".format(len(cipher._key) * 8).encode("ascii") + else: + assert isinstance(cipher, AESGCM) + return "aes-{0}-gcm".format(len(cipher._key) * 8).encode("ascii") + + +def _aead_setup(backend, cipher_name, key, nonce, tag, tag_len, operation): + evp_cipher = backend._lib.EVP_get_cipherbyname(cipher_name) + backend.openssl_assert(evp_cipher != backend._ffi.NULL) + ctx = backend._lib.EVP_CIPHER_CTX_new() + ctx = backend._ffi.gc(ctx, backend._lib.EVP_CIPHER_CTX_free) + res = backend._lib.EVP_CipherInit_ex( + ctx, evp_cipher, + backend._ffi.NULL, + backend._ffi.NULL, + backend._ffi.NULL, + int(operation == _ENCRYPT) + ) + backend.openssl_assert(res != 0) + res = backend._lib.EVP_CIPHER_CTX_set_key_length(ctx, len(key)) + backend.openssl_assert(res != 0) + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_IVLEN, len(nonce), + backend._ffi.NULL + ) + backend.openssl_assert(res != 0) + if operation == _DECRYPT: + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag + ) + backend.openssl_assert(res != 0) + else: + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, tag_len, backend._ffi.NULL + ) + + res = backend._lib.EVP_CipherInit_ex( + ctx, + backend._ffi.NULL, + backend._ffi.NULL, + key, + nonce, + int(operation == _ENCRYPT) + ) + backend.openssl_assert(res != 0) + return ctx + + +def _set_length(backend, ctx, data_len): + intptr = backend._ffi.new("int *") + res = backend._lib.EVP_CipherUpdate( + ctx, + backend._ffi.NULL, + intptr, + backend._ffi.NULL, + data_len + ) + backend.openssl_assert(res != 0) + + +def _process_aad(backend, ctx, associated_data): + outlen = backend._ffi.new("int *") + res = backend._lib.EVP_CipherUpdate( + ctx, backend._ffi.NULL, outlen, associated_data, len(associated_data) + ) + backend.openssl_assert(res != 0) + + +def _process_data(backend, ctx, data): + outlen = backend._ffi.new("int *") + buf = backend._ffi.new("unsigned char[]", len(data)) + res = backend._lib.EVP_CipherUpdate(ctx, buf, outlen, data, len(data)) + backend.openssl_assert(res != 0) + return backend._ffi.buffer(buf, outlen[0])[:] + + +def _encrypt(backend, cipher, nonce, data, associated_data, tag_length): + from cryptography.hazmat.primitives.ciphers.aead import AESCCM + cipher_name = _aead_cipher_name(cipher) + ctx = _aead_setup( + backend, cipher_name, cipher._key, nonce, None, tag_length, _ENCRYPT + ) + # CCM requires us to pass the length of the data before processing anything + # However calling this with any other AEAD results in an error + if isinstance(cipher, AESCCM): + _set_length(backend, ctx, len(data)) + + _process_aad(backend, ctx, associated_data) + processed_data = _process_data(backend, ctx, data) + outlen = backend._ffi.new("int *") + res = backend._lib.EVP_CipherFinal_ex(ctx, backend._ffi.NULL, outlen) + backend.openssl_assert(res != 0) + backend.openssl_assert(outlen[0] == 0) + tag_buf = backend._ffi.new("unsigned char[]", tag_length) + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_GET_TAG, tag_length, tag_buf + ) + backend.openssl_assert(res != 0) + tag = backend._ffi.buffer(tag_buf)[:] + + return processed_data + tag + + +def _decrypt(backend, cipher, nonce, data, associated_data, tag_length): + from cryptography.hazmat.primitives.ciphers.aead import AESCCM + if len(data) < tag_length: + raise InvalidTag + tag = data[-tag_length:] + data = data[:-tag_length] + cipher_name = _aead_cipher_name(cipher) + ctx = _aead_setup( + backend, cipher_name, cipher._key, nonce, tag, tag_length, _DECRYPT + ) + # CCM requires us to pass the length of the data before processing anything + # However calling this with any other AEAD results in an error + if isinstance(cipher, AESCCM): + _set_length(backend, ctx, len(data)) + + _process_aad(backend, ctx, associated_data) + # CCM has a different error path if the tag doesn't match. Errors are + # raised in Update and Final is irrelevant. + if isinstance(cipher, AESCCM): + outlen = backend._ffi.new("int *") + buf = backend._ffi.new("unsigned char[]", len(data)) + res = backend._lib.EVP_CipherUpdate(ctx, buf, outlen, data, len(data)) + if res != 1: + backend._consume_errors() + raise InvalidTag + + processed_data = backend._ffi.buffer(buf, outlen[0])[:] + else: + processed_data = _process_data(backend, ctx, data) + outlen = backend._ffi.new("int *") + res = backend._lib.EVP_CipherFinal_ex(ctx, backend._ffi.NULL, outlen) + if res == 0: + backend._consume_errors() + raise InvalidTag + + return processed_data diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/backend.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/backend.py new file mode 100644 index 0000000..af14bfa --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/backend.py @@ -0,0 +1,1974 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import base64 +import calendar +import collections +import contextlib +import itertools +from contextlib import contextmanager + +import six + +from cryptography import utils, x509 +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.backends.interfaces import ( + CMACBackend, CipherBackend, DERSerializationBackend, DHBackend, DSABackend, + EllipticCurveBackend, HMACBackend, HashBackend, PBKDF2HMACBackend, + PEMSerializationBackend, RSABackend, ScryptBackend, X509Backend +) +from cryptography.hazmat.backends.openssl import aead +from cryptography.hazmat.backends.openssl.ciphers import _CipherContext +from cryptography.hazmat.backends.openssl.cmac import _CMACContext +from cryptography.hazmat.backends.openssl.decode_asn1 import _Integers +from cryptography.hazmat.backends.openssl.dh import ( + _DHParameters, _DHPrivateKey, _DHPublicKey, _dh_params_dup +) +from cryptography.hazmat.backends.openssl.dsa import ( + _DSAParameters, _DSAPrivateKey, _DSAPublicKey +) +from cryptography.hazmat.backends.openssl.ec import ( + _EllipticCurvePrivateKey, _EllipticCurvePublicKey +) +from cryptography.hazmat.backends.openssl.encode_asn1 import ( + _CRL_ENTRY_EXTENSION_ENCODE_HANDLERS, + _CRL_EXTENSION_ENCODE_HANDLERS, _EXTENSION_ENCODE_HANDLERS, + _encode_asn1_int_gc, _encode_asn1_str_gc, _encode_name_gc, _txt2obj_gc, +) +from cryptography.hazmat.backends.openssl.hashes import _HashContext +from cryptography.hazmat.backends.openssl.hmac import _HMACContext +from cryptography.hazmat.backends.openssl.rsa import ( + _RSAPrivateKey, _RSAPublicKey +) +from cryptography.hazmat.backends.openssl.x25519 import ( + _X25519PrivateKey, _X25519PublicKey +) +from cryptography.hazmat.backends.openssl.x509 import ( + _Certificate, _CertificateRevocationList, + _CertificateSigningRequest, _RevokedCertificate +) +from cryptography.hazmat.bindings.openssl import binding +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa +from cryptography.hazmat.primitives.asymmetric.padding import ( + MGF1, OAEP, PKCS1v15, PSS +) +from cryptography.hazmat.primitives.ciphers.algorithms import ( + AES, ARC4, Blowfish, CAST5, Camellia, ChaCha20, IDEA, SEED, TripleDES +) +from cryptography.hazmat.primitives.ciphers.modes import ( + CBC, CFB, CFB8, CTR, ECB, GCM, OFB, XTS +) +from cryptography.hazmat.primitives.kdf import scrypt + + +_MemoryBIO = collections.namedtuple("_MemoryBIO", ["bio", "char_ptr"]) + + +@utils.register_interface(CipherBackend) +@utils.register_interface(CMACBackend) +@utils.register_interface(DERSerializationBackend) +@utils.register_interface(DHBackend) +@utils.register_interface(DSABackend) +@utils.register_interface(EllipticCurveBackend) +@utils.register_interface(HashBackend) +@utils.register_interface(HMACBackend) +@utils.register_interface(PBKDF2HMACBackend) +@utils.register_interface(RSABackend) +@utils.register_interface(PEMSerializationBackend) +@utils.register_interface(X509Backend) +@utils.register_interface_if( + binding.Binding().lib.Cryptography_HAS_SCRYPT, ScryptBackend +) +class Backend(object): + """ + OpenSSL API binding interfaces. + """ + name = "openssl" + + def __init__(self): + self._binding = binding.Binding() + self._ffi = self._binding.ffi + self._lib = self._binding.lib + + self._cipher_registry = {} + self._register_default_ciphers() + self.activate_osrandom_engine() + self._dh_types = [self._lib.EVP_PKEY_DH] + if self._lib.Cryptography_HAS_EVP_PKEY_DHX: + self._dh_types.append(self._lib.EVP_PKEY_DHX) + + def openssl_assert(self, ok): + return binding._openssl_assert(self._lib, ok) + + def activate_builtin_random(self): + # Obtain a new structural reference. + e = self._lib.ENGINE_get_default_RAND() + if e != self._ffi.NULL: + self._lib.ENGINE_unregister_RAND(e) + # Reset the RNG to use the new engine. + self._lib.RAND_cleanup() + # decrement the structural reference from get_default_RAND + res = self._lib.ENGINE_finish(e) + self.openssl_assert(res == 1) + + @contextlib.contextmanager + def _get_osurandom_engine(self): + # Fetches an engine by id and returns it. This creates a structural + # reference. + e = self._lib.ENGINE_by_id(self._binding._osrandom_engine_id) + self.openssl_assert(e != self._ffi.NULL) + # Initialize the engine for use. This adds a functional reference. + res = self._lib.ENGINE_init(e) + self.openssl_assert(res == 1) + + try: + yield e + finally: + # Decrement the structural ref incremented by ENGINE_by_id. + res = self._lib.ENGINE_free(e) + self.openssl_assert(res == 1) + # Decrement the functional ref incremented by ENGINE_init. + res = self._lib.ENGINE_finish(e) + self.openssl_assert(res == 1) + + def activate_osrandom_engine(self): + # Unregister and free the current engine. + self.activate_builtin_random() + with self._get_osurandom_engine() as e: + # Set the engine as the default RAND provider. + res = self._lib.ENGINE_set_default_RAND(e) + self.openssl_assert(res == 1) + # Reset the RNG to use the new engine. + self._lib.RAND_cleanup() + + def osrandom_engine_implementation(self): + buf = self._ffi.new("char[]", 64) + with self._get_osurandom_engine() as e: + res = self._lib.ENGINE_ctrl_cmd(e, b"get_implementation", + len(buf), buf, + self._ffi.NULL, 0) + self.openssl_assert(res > 0) + return self._ffi.string(buf).decode('ascii') + + def openssl_version_text(self): + """ + Friendly string name of the loaded OpenSSL library. This is not + necessarily the same version as it was compiled against. + + Example: OpenSSL 1.0.1e 11 Feb 2013 + """ + return self._ffi.string( + self._lib.OpenSSL_version(self._lib.OPENSSL_VERSION) + ).decode("ascii") + + def openssl_version_number(self): + return self._lib.OpenSSL_version_num() + + def create_hmac_ctx(self, key, algorithm): + return _HMACContext(self, key, algorithm) + + def _build_openssl_digest_name(self, algorithm): + if algorithm.name == "blake2b" or algorithm.name == "blake2s": + alg = "{0}{1}".format( + algorithm.name, algorithm.digest_size * 8 + ).encode("ascii") + else: + alg = algorithm.name.encode("ascii") + + return alg + + def hash_supported(self, algorithm): + name = self._build_openssl_digest_name(algorithm) + digest = self._lib.EVP_get_digestbyname(name) + return digest != self._ffi.NULL + + def hmac_supported(self, algorithm): + return self.hash_supported(algorithm) + + def create_hash_ctx(self, algorithm): + return _HashContext(self, algorithm) + + def cipher_supported(self, cipher, mode): + try: + adapter = self._cipher_registry[type(cipher), type(mode)] + except KeyError: + return False + evp_cipher = adapter(self, cipher, mode) + return self._ffi.NULL != evp_cipher + + def register_cipher_adapter(self, cipher_cls, mode_cls, adapter): + if (cipher_cls, mode_cls) in self._cipher_registry: + raise ValueError("Duplicate registration for: {0} {1}.".format( + cipher_cls, mode_cls) + ) + self._cipher_registry[cipher_cls, mode_cls] = adapter + + def _register_default_ciphers(self): + for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8, GCM]: + self.register_cipher_adapter( + AES, + mode_cls, + GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}") + ) + for mode_cls in [CBC, CTR, ECB, OFB, CFB]: + self.register_cipher_adapter( + Camellia, + mode_cls, + GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}") + ) + for mode_cls in [CBC, CFB, CFB8, OFB]: + self.register_cipher_adapter( + TripleDES, + mode_cls, + GetCipherByName("des-ede3-{mode.name}") + ) + self.register_cipher_adapter( + TripleDES, + ECB, + GetCipherByName("des-ede3") + ) + for mode_cls in [CBC, CFB, OFB, ECB]: + self.register_cipher_adapter( + Blowfish, + mode_cls, + GetCipherByName("bf-{mode.name}") + ) + for mode_cls in [CBC, CFB, OFB, ECB]: + self.register_cipher_adapter( + SEED, + mode_cls, + GetCipherByName("seed-{mode.name}") + ) + for cipher_cls, mode_cls in itertools.product( + [CAST5, IDEA], + [CBC, OFB, CFB, ECB], + ): + self.register_cipher_adapter( + cipher_cls, + mode_cls, + GetCipherByName("{cipher.name}-{mode.name}") + ) + self.register_cipher_adapter( + ARC4, + type(None), + GetCipherByName("rc4") + ) + self.register_cipher_adapter( + ChaCha20, + type(None), + GetCipherByName("chacha20") + ) + self.register_cipher_adapter(AES, XTS, _get_xts_cipher) + + def create_symmetric_encryption_ctx(self, cipher, mode): + return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT) + + def create_symmetric_decryption_ctx(self, cipher, mode): + return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT) + + def pbkdf2_hmac_supported(self, algorithm): + return self.hmac_supported(algorithm) + + def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations, + key_material): + buf = self._ffi.new("unsigned char[]", length) + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode("ascii")) + self.openssl_assert(evp_md != self._ffi.NULL) + res = self._lib.PKCS5_PBKDF2_HMAC( + key_material, + len(key_material), + salt, + len(salt), + iterations, + evp_md, + length, + buf + ) + self.openssl_assert(res == 1) + return self._ffi.buffer(buf)[:] + + def _consume_errors(self): + return binding._consume_errors(self._lib) + + def _bn_to_int(self, bn): + assert bn != self._ffi.NULL + + if not six.PY2: + # Python 3 has constant time from_bytes, so use that. + bn_num_bytes = self._lib.BN_num_bytes(bn) + bin_ptr = self._ffi.new("unsigned char[]", bn_num_bytes) + bin_len = self._lib.BN_bn2bin(bn, bin_ptr) + # A zero length means the BN has value 0 + self.openssl_assert(bin_len >= 0) + return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big") + else: + # Under Python 2 the best we can do is hex() + hex_cdata = self._lib.BN_bn2hex(bn) + self.openssl_assert(hex_cdata != self._ffi.NULL) + hex_str = self._ffi.string(hex_cdata) + self._lib.OPENSSL_free(hex_cdata) + return int(hex_str, 16) + + def _int_to_bn(self, num, bn=None): + """ + Converts a python integer to a BIGNUM. The returned BIGNUM will not + be garbage collected (to support adding them to structs that take + ownership of the object). Be sure to register it for GC if it will + be discarded after use. + """ + assert bn is None or bn != self._ffi.NULL + + if bn is None: + bn = self._ffi.NULL + + if not six.PY2: + # Python 3 has constant time to_bytes, so use that. + + binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), "big") + bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn) + self.openssl_assert(bn_ptr != self._ffi.NULL) + return bn_ptr + + else: + # Under Python 2 the best we can do is hex(), [2:] removes the 0x + # prefix. + hex_num = hex(num).rstrip("L")[2:].encode("ascii") + bn_ptr = self._ffi.new("BIGNUM **") + bn_ptr[0] = bn + res = self._lib.BN_hex2bn(bn_ptr, hex_num) + self.openssl_assert(res != 0) + self.openssl_assert(bn_ptr[0] != self._ffi.NULL) + return bn_ptr[0] + + def generate_rsa_private_key(self, public_exponent, key_size): + rsa._verify_rsa_parameters(public_exponent, key_size) + + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + + bn = self._int_to_bn(public_exponent) + bn = self._ffi.gc(bn, self._lib.BN_free) + + res = self._lib.RSA_generate_key_ex( + rsa_cdata, key_size, bn, self._ffi.NULL + ) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPrivateKey(self, rsa_cdata, evp_pkey) + + def generate_rsa_parameters_supported(self, public_exponent, key_size): + return (public_exponent >= 3 and public_exponent & 1 != 0 and + key_size >= 512) + + def load_rsa_private_numbers(self, numbers): + rsa._check_private_key_components( + numbers.p, + numbers.q, + numbers.d, + numbers.dmp1, + numbers.dmq1, + numbers.iqmp, + numbers.public_numbers.e, + numbers.public_numbers.n + ) + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + p = self._int_to_bn(numbers.p) + q = self._int_to_bn(numbers.q) + d = self._int_to_bn(numbers.d) + dmp1 = self._int_to_bn(numbers.dmp1) + dmq1 = self._int_to_bn(numbers.dmq1) + iqmp = self._int_to_bn(numbers.iqmp) + e = self._int_to_bn(numbers.public_numbers.e) + n = self._int_to_bn(numbers.public_numbers.n) + res = self._lib.RSA_set0_factors(rsa_cdata, p, q) + self.openssl_assert(res == 1) + res = self._lib.RSA_set0_key(rsa_cdata, n, e, d) + self.openssl_assert(res == 1) + res = self._lib.RSA_set0_crt_params(rsa_cdata, dmp1, dmq1, iqmp) + self.openssl_assert(res == 1) + res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPrivateKey(self, rsa_cdata, evp_pkey) + + def load_rsa_public_numbers(self, numbers): + rsa._check_public_key_components(numbers.e, numbers.n) + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + e = self._int_to_bn(numbers.e) + n = self._int_to_bn(numbers.n) + res = self._lib.RSA_set0_key(rsa_cdata, n, e, self._ffi.NULL) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + + def _create_evp_pkey_gc(self): + evp_pkey = self._lib.EVP_PKEY_new() + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return evp_pkey + + def _rsa_cdata_to_evp_pkey(self, rsa_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_RSA(evp_pkey, rsa_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def _bytes_to_bio(self, data): + """ + Return a _MemoryBIO namedtuple of (BIO, char*). + + The char* is the storage for the BIO and it must stay alive until the + BIO is finished with. + """ + data_char_p = self._ffi.new("char[]", data) + bio = self._lib.BIO_new_mem_buf( + data_char_p, len(data) + ) + self.openssl_assert(bio != self._ffi.NULL) + + return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p) + + def _create_mem_bio_gc(self): + """ + Creates an empty memory BIO. + """ + bio_method = self._lib.BIO_s_mem() + self.openssl_assert(bio_method != self._ffi.NULL) + bio = self._lib.BIO_new(bio_method) + self.openssl_assert(bio != self._ffi.NULL) + bio = self._ffi.gc(bio, self._lib.BIO_free) + return bio + + def _read_mem_bio(self, bio): + """ + Reads a memory BIO. This only works on memory BIOs. + """ + buf = self._ffi.new("char **") + buf_len = self._lib.BIO_get_mem_data(bio, buf) + self.openssl_assert(buf_len > 0) + self.openssl_assert(buf[0] != self._ffi.NULL) + bio_data = self._ffi.buffer(buf[0], buf_len)[:] + return bio_data + + def _evp_pkey_to_private_key(self, evp_pkey): + """ + Return the appropriate type of PrivateKey given an evp_pkey cdata + pointer. + """ + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + + if key_type == self._lib.EVP_PKEY_RSA: + rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey) + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + return _RSAPrivateKey(self, rsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_DSA: + dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey) + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + return _DSAPrivateKey(self, dsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_EC: + ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + elif key_type in self._dh_types: + dh_cdata = self._lib.EVP_PKEY_get1_DH(evp_pkey) + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHPrivateKey(self, dh_cdata, evp_pkey) + else: + raise UnsupportedAlgorithm("Unsupported key type.") + + def _evp_pkey_to_public_key(self, evp_pkey): + """ + Return the appropriate type of PublicKey given an evp_pkey cdata + pointer. + """ + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + + if key_type == self._lib.EVP_PKEY_RSA: + rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey) + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_DSA: + dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey) + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + return _DSAPublicKey(self, dsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_EC: + ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + elif key_type in self._dh_types: + dh_cdata = self._lib.EVP_PKEY_get1_DH(evp_pkey) + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHPublicKey(self, dh_cdata, evp_pkey) + else: + raise UnsupportedAlgorithm("Unsupported key type.") + + def _oaep_hash_supported(self, algorithm): + if self._lib.Cryptography_HAS_RSA_OAEP_MD: + return isinstance( + algorithm, ( + hashes.SHA1, + hashes.SHA224, + hashes.SHA256, + hashes.SHA384, + hashes.SHA512, + ) + ) + else: + return isinstance(algorithm, hashes.SHA1) + + def rsa_padding_supported(self, padding): + if isinstance(padding, PKCS1v15): + return True + elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1): + return self.hash_supported(padding._mgf._algorithm) + elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1): + return ( + self._oaep_hash_supported(padding._mgf._algorithm) and + self._oaep_hash_supported(padding._algorithm) and + ( + (padding._label is None or len(padding._label) == 0) or + self._lib.Cryptography_HAS_RSA_OAEP_LABEL == 1 + ) + ) + else: + return False + + def generate_dsa_parameters(self, key_size): + if key_size not in (1024, 2048, 3072): + raise ValueError("Key size must be 1024 or 2048 or 3072 bits.") + + ctx = self._lib.DSA_new() + self.openssl_assert(ctx != self._ffi.NULL) + ctx = self._ffi.gc(ctx, self._lib.DSA_free) + + res = self._lib.DSA_generate_parameters_ex( + ctx, key_size, self._ffi.NULL, 0, + self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + + self.openssl_assert(res == 1) + + return _DSAParameters(self, ctx) + + def generate_dsa_private_key(self, parameters): + ctx = self._lib.DSAparams_dup(parameters._dsa_cdata) + self.openssl_assert(ctx != self._ffi.NULL) + ctx = self._ffi.gc(ctx, self._lib.DSA_free) + self._lib.DSA_generate_key(ctx) + evp_pkey = self._dsa_cdata_to_evp_pkey(ctx) + + return _DSAPrivateKey(self, ctx, evp_pkey) + + def generate_dsa_private_key_and_parameters(self, key_size): + parameters = self.generate_dsa_parameters(key_size) + return self.generate_dsa_private_key(parameters) + + def _dsa_cdata_set_values(self, dsa_cdata, p, q, g, pub_key, priv_key): + res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g) + self.openssl_assert(res == 1) + res = self._lib.DSA_set0_key(dsa_cdata, pub_key, priv_key) + self.openssl_assert(res == 1) + + def load_dsa_private_numbers(self, numbers): + dsa._check_dsa_private_numbers(numbers) + parameter_numbers = numbers.public_numbers.parameter_numbers + + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(parameter_numbers.p) + q = self._int_to_bn(parameter_numbers.q) + g = self._int_to_bn(parameter_numbers.g) + pub_key = self._int_to_bn(numbers.public_numbers.y) + priv_key = self._int_to_bn(numbers.x) + self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key) + + evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata) + + return _DSAPrivateKey(self, dsa_cdata, evp_pkey) + + def load_dsa_public_numbers(self, numbers): + dsa._check_dsa_parameters(numbers.parameter_numbers) + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(numbers.parameter_numbers.p) + q = self._int_to_bn(numbers.parameter_numbers.q) + g = self._int_to_bn(numbers.parameter_numbers.g) + pub_key = self._int_to_bn(numbers.y) + priv_key = self._ffi.NULL + self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key) + + evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata) + + return _DSAPublicKey(self, dsa_cdata, evp_pkey) + + def load_dsa_parameter_numbers(self, numbers): + dsa._check_dsa_parameters(numbers) + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(numbers.p) + q = self._int_to_bn(numbers.q) + g = self._int_to_bn(numbers.g) + res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g) + self.openssl_assert(res == 1) + + return _DSAParameters(self, dsa_cdata) + + def _dsa_cdata_to_evp_pkey(self, dsa_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_DSA(evp_pkey, dsa_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def dsa_hash_supported(self, algorithm): + return self.hash_supported(algorithm) + + def dsa_parameters_supported(self, p, q, g): + return True + + def cmac_algorithm_supported(self, algorithm): + return self.cipher_supported( + algorithm, CBC(b"\x00" * algorithm.block_size) + ) + + def create_cmac_ctx(self, algorithm): + return _CMACContext(self, algorithm) + + def create_x509_csr(self, builder, private_key, algorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError('Algorithm must be a registered hash algorithm.') + + if ( + isinstance(algorithm, hashes.MD5) and not + isinstance(private_key, rsa.RSAPrivateKey) + ): + raise ValueError( + "MD5 is not a supported hash algorithm for EC/DSA CSRs" + ) + + # Resolve the signature algorithm. + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode('ascii') + ) + self.openssl_assert(evp_md != self._ffi.NULL) + + # Create an empty request. + x509_req = self._lib.X509_REQ_new() + self.openssl_assert(x509_req != self._ffi.NULL) + x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) + + # Set x509 version. + res = self._lib.X509_REQ_set_version(x509_req, x509.Version.v1.value) + self.openssl_assert(res == 1) + + # Set subject name. + res = self._lib.X509_REQ_set_subject_name( + x509_req, _encode_name_gc(self, builder._subject_name) + ) + self.openssl_assert(res == 1) + + # Set subject public key. + public_key = private_key.public_key() + res = self._lib.X509_REQ_set_pubkey( + x509_req, public_key._evp_pkey + ) + self.openssl_assert(res == 1) + + # Add extensions. + sk_extension = self._lib.sk_X509_EXTENSION_new_null() + self.openssl_assert(sk_extension != self._ffi.NULL) + sk_extension = self._ffi.gc( + sk_extension, self._lib.sk_X509_EXTENSION_free + ) + # gc is not necessary for CSRs, as sk_X509_EXTENSION_free + # will release all the X509_EXTENSIONs. + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_EXTENSION_ENCODE_HANDLERS, + x509_obj=sk_extension, + add_func=self._lib.sk_X509_EXTENSION_insert, + gc=False + ) + res = self._lib.X509_REQ_add_extensions(x509_req, sk_extension) + self.openssl_assert(res == 1) + + # Sign the request using the requester's private key. + res = self._lib.X509_REQ_sign( + x509_req, private_key._evp_pkey, evp_md + ) + if res == 0: + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_RSA, + self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY + ) + ) + + raise ValueError("Digest too big for RSA key") + + return _CertificateSigningRequest(self, x509_req) + + def create_x509_certificate(self, builder, private_key, algorithm): + if not isinstance(builder, x509.CertificateBuilder): + raise TypeError('Builder type mismatch.') + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError('Algorithm must be a registered hash algorithm.') + + if ( + isinstance(algorithm, hashes.MD5) and not + isinstance(private_key, rsa.RSAPrivateKey) + ): + raise ValueError( + "MD5 is not a supported hash algorithm for EC/DSA certificates" + ) + + # Resolve the signature algorithm. + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode('ascii') + ) + self.openssl_assert(evp_md != self._ffi.NULL) + + # Create an empty certificate. + x509_cert = self._lib.X509_new() + x509_cert = self._ffi.gc(x509_cert, backend._lib.X509_free) + + # Set the x509 version. + res = self._lib.X509_set_version(x509_cert, builder._version.value) + self.openssl_assert(res == 1) + + # Set the subject's name. + res = self._lib.X509_set_subject_name( + x509_cert, _encode_name_gc(self, builder._subject_name) + ) + self.openssl_assert(res == 1) + + # Set the subject's public key. + res = self._lib.X509_set_pubkey( + x509_cert, builder._public_key._evp_pkey + ) + self.openssl_assert(res == 1) + + # Set the certificate serial number. + serial_number = _encode_asn1_int_gc(self, builder._serial_number) + res = self._lib.X509_set_serialNumber(x509_cert, serial_number) + self.openssl_assert(res == 1) + + # Set the "not before" time. + res = self._lib.ASN1_TIME_set( + self._lib.X509_get_notBefore(x509_cert), + calendar.timegm(builder._not_valid_before.timetuple()) + ) + if res == self._ffi.NULL: + self._raise_time_set_error() + + # Set the "not after" time. + res = self._lib.ASN1_TIME_set( + self._lib.X509_get_notAfter(x509_cert), + calendar.timegm(builder._not_valid_after.timetuple()) + ) + if res == self._ffi.NULL: + self._raise_time_set_error() + + # Add extensions. + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_EXTENSION_ENCODE_HANDLERS, + x509_obj=x509_cert, + add_func=self._lib.X509_add_ext, + gc=True + ) + + # Set the issuer name. + res = self._lib.X509_set_issuer_name( + x509_cert, _encode_name_gc(self, builder._issuer_name) + ) + self.openssl_assert(res == 1) + + # Sign the certificate with the issuer's private key. + res = self._lib.X509_sign( + x509_cert, private_key._evp_pkey, evp_md + ) + if res == 0: + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_RSA, + self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY + ) + ) + raise ValueError("Digest too big for RSA key") + + return _Certificate(self, x509_cert) + + def _raise_time_set_error(self): + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_ASN1, + self._lib.ASN1_R_ERROR_GETTING_TIME + ) + ) + raise ValueError( + "Invalid time. This error can occur if you set a time too far in " + "the future on Windows." + ) + + def create_x509_crl(self, builder, private_key, algorithm): + if not isinstance(builder, x509.CertificateRevocationListBuilder): + raise TypeError('Builder type mismatch.') + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError('Algorithm must be a registered hash algorithm.') + + if ( + isinstance(algorithm, hashes.MD5) and not + isinstance(private_key, rsa.RSAPrivateKey) + ): + raise ValueError( + "MD5 is not a supported hash algorithm for EC/DSA CRLs" + ) + + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode('ascii') + ) + self.openssl_assert(evp_md != self._ffi.NULL) + + # Create an empty CRL. + x509_crl = self._lib.X509_CRL_new() + x509_crl = self._ffi.gc(x509_crl, backend._lib.X509_CRL_free) + + # Set the x509 CRL version. We only support v2 (integer value 1). + res = self._lib.X509_CRL_set_version(x509_crl, 1) + self.openssl_assert(res == 1) + + # Set the issuer name. + res = self._lib.X509_CRL_set_issuer_name( + x509_crl, _encode_name_gc(self, builder._issuer_name) + ) + self.openssl_assert(res == 1) + + # Set the last update time. + last_update = self._lib.ASN1_TIME_set( + self._ffi.NULL, calendar.timegm(builder._last_update.timetuple()) + ) + self.openssl_assert(last_update != self._ffi.NULL) + last_update = self._ffi.gc(last_update, self._lib.ASN1_TIME_free) + res = self._lib.X509_CRL_set_lastUpdate(x509_crl, last_update) + self.openssl_assert(res == 1) + + # Set the next update time. + next_update = self._lib.ASN1_TIME_set( + self._ffi.NULL, calendar.timegm(builder._next_update.timetuple()) + ) + self.openssl_assert(next_update != self._ffi.NULL) + next_update = self._ffi.gc(next_update, self._lib.ASN1_TIME_free) + res = self._lib.X509_CRL_set_nextUpdate(x509_crl, next_update) + self.openssl_assert(res == 1) + + # Add extensions. + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_CRL_EXTENSION_ENCODE_HANDLERS, + x509_obj=x509_crl, + add_func=self._lib.X509_CRL_add_ext, + gc=True + ) + + # add revoked certificates + for revoked_cert in builder._revoked_certificates: + # Duplicating because the X509_CRL takes ownership and will free + # this memory when X509_CRL_free is called. + revoked = self._lib.Cryptography_X509_REVOKED_dup( + revoked_cert._x509_revoked + ) + self.openssl_assert(revoked != self._ffi.NULL) + res = self._lib.X509_CRL_add0_revoked(x509_crl, revoked) + self.openssl_assert(res == 1) + + res = self._lib.X509_CRL_sign( + x509_crl, private_key._evp_pkey, evp_md + ) + if res == 0: + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_RSA, + self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY + ) + ) + raise ValueError("Digest too big for RSA key") + + return _CertificateRevocationList(self, x509_crl) + + def _create_x509_extensions(self, extensions, handlers, x509_obj, + add_func, gc): + for i, extension in enumerate(extensions): + x509_extension = self._create_x509_extension( + handlers, extension + ) + self.openssl_assert(x509_extension != self._ffi.NULL) + + if gc: + x509_extension = self._ffi.gc( + x509_extension, self._lib.X509_EXTENSION_free + ) + res = add_func(x509_obj, x509_extension, i) + self.openssl_assert(res >= 1) + + def _create_raw_x509_extension(self, extension, value): + obj = _txt2obj_gc(self, extension.oid.dotted_string) + return self._lib.X509_EXTENSION_create_by_OBJ( + self._ffi.NULL, obj, 1 if extension.critical else 0, value + ) + + def _create_x509_extension(self, handlers, extension): + if isinstance(extension.value, x509.UnrecognizedExtension): + value = _encode_asn1_str_gc( + self, extension.value.value, len(extension.value.value) + ) + return self._create_raw_x509_extension(extension, value) + elif isinstance(extension.value, x509.TLSFeature): + asn1 = _Integers([x.value for x in extension.value]).dump() + value = _encode_asn1_str_gc(self, asn1, len(asn1)) + return self._create_raw_x509_extension(extension, value) + else: + try: + encode = handlers[extension.oid] + except KeyError: + raise NotImplementedError( + 'Extension not supported: {0}'.format(extension.oid) + ) + + ext_struct = encode(self, extension.value) + nid = self._lib.OBJ_txt2nid( + extension.oid.dotted_string.encode("ascii") + ) + backend.openssl_assert(nid != self._lib.NID_undef) + return self._lib.X509V3_EXT_i2d( + nid, 1 if extension.critical else 0, ext_struct + ) + + def create_x509_revoked_certificate(self, builder): + if not isinstance(builder, x509.RevokedCertificateBuilder): + raise TypeError('Builder type mismatch.') + + x509_revoked = self._lib.X509_REVOKED_new() + self.openssl_assert(x509_revoked != self._ffi.NULL) + x509_revoked = self._ffi.gc(x509_revoked, self._lib.X509_REVOKED_free) + serial_number = _encode_asn1_int_gc(self, builder._serial_number) + res = self._lib.X509_REVOKED_set_serialNumber( + x509_revoked, serial_number + ) + self.openssl_assert(res == 1) + rev_date = self._lib.ASN1_TIME_set( + self._ffi.NULL, + calendar.timegm(builder._revocation_date.timetuple()) + ) + self.openssl_assert(rev_date != self._ffi.NULL) + rev_date = self._ffi.gc(rev_date, self._lib.ASN1_TIME_free) + res = self._lib.X509_REVOKED_set_revocationDate(x509_revoked, rev_date) + self.openssl_assert(res == 1) + # add CRL entry extensions + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS, + x509_obj=x509_revoked, + add_func=self._lib.X509_REVOKED_add_ext, + gc=True + ) + return _RevokedCertificate(self, None, x509_revoked) + + def load_pem_private_key(self, data, password): + return self._load_key( + self._lib.PEM_read_bio_PrivateKey, + self._evp_pkey_to_private_key, + data, + password, + ) + + def load_pem_public_key(self, data): + mem_bio = self._bytes_to_bio(data) + evp_pkey = self._lib.PEM_read_bio_PUBKEY( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if evp_pkey != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return self._evp_pkey_to_public_key(evp_pkey) + else: + # It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still + # need to check to see if it is a pure PKCS1 RSA public key (not + # embedded in a subjectPublicKeyInfo) + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + rsa_cdata = self._lib.PEM_read_bio_RSAPublicKey( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if rsa_cdata != self._ffi.NULL: + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + else: + self._handle_key_loading_error() + + def load_pem_parameters(self, data): + mem_bio = self._bytes_to_bio(data) + # only DH is supported currently + dh_cdata = self._lib.PEM_read_bio_DHparams( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + else: + self._handle_key_loading_error() + + def load_der_private_key(self, data, password): + # OpenSSL has a function called d2i_AutoPrivateKey that in theory + # handles this automatically, however it doesn't handle encrypted + # private keys. Instead we try to load the key two different ways. + # First we'll try to load it as a traditional key. + bio_data = self._bytes_to_bio(data) + key = self._evp_pkey_from_der_traditional_key(bio_data, password) + if key: + return self._evp_pkey_to_private_key(key) + else: + # Finally we try to load it with the method that handles encrypted + # PKCS8 properly. + return self._load_key( + self._lib.d2i_PKCS8PrivateKey_bio, + self._evp_pkey_to_private_key, + data, + password, + ) + + def _evp_pkey_from_der_traditional_key(self, bio_data, password): + key = self._lib.d2i_PrivateKey_bio(bio_data.bio, self._ffi.NULL) + if key != self._ffi.NULL: + key = self._ffi.gc(key, self._lib.EVP_PKEY_free) + if password is not None: + raise TypeError( + "Password was given but private key is not encrypted." + ) + + return key + else: + self._consume_errors() + return None + + def load_der_public_key(self, data): + mem_bio = self._bytes_to_bio(data) + evp_pkey = self._lib.d2i_PUBKEY_bio(mem_bio.bio, self._ffi.NULL) + if evp_pkey != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return self._evp_pkey_to_public_key(evp_pkey) + else: + # It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still + # need to check to see if it is a pure PKCS1 RSA public key (not + # embedded in a subjectPublicKeyInfo) + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + rsa_cdata = self._lib.d2i_RSAPublicKey_bio( + mem_bio.bio, self._ffi.NULL + ) + if rsa_cdata != self._ffi.NULL: + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + else: + self._handle_key_loading_error() + + def load_der_parameters(self, data): + mem_bio = self._bytes_to_bio(data) + dh_cdata = self._lib.d2i_DHparams_bio( + mem_bio.bio, self._ffi.NULL + ) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + elif self._lib.Cryptography_HAS_EVP_PKEY_DHX: + # We check to see if the is dhx. + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + dh_cdata = self._lib.Cryptography_d2i_DHxparams_bio( + mem_bio.bio, self._ffi.NULL + ) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + + self._handle_key_loading_error() + + def load_pem_x509_certificate(self, data): + mem_bio = self._bytes_to_bio(data) + x509 = self._lib.PEM_read_bio_X509( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if x509 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load certificate") + + x509 = self._ffi.gc(x509, self._lib.X509_free) + return _Certificate(self, x509) + + def load_der_x509_certificate(self, data): + mem_bio = self._bytes_to_bio(data) + x509 = self._lib.d2i_X509_bio(mem_bio.bio, self._ffi.NULL) + if x509 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load certificate") + + x509 = self._ffi.gc(x509, self._lib.X509_free) + return _Certificate(self, x509) + + def load_pem_x509_crl(self, data): + mem_bio = self._bytes_to_bio(data) + x509_crl = self._lib.PEM_read_bio_X509_CRL( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if x509_crl == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load CRL") + + x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free) + return _CertificateRevocationList(self, x509_crl) + + def load_der_x509_crl(self, data): + mem_bio = self._bytes_to_bio(data) + x509_crl = self._lib.d2i_X509_CRL_bio(mem_bio.bio, self._ffi.NULL) + if x509_crl == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load CRL") + + x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free) + return _CertificateRevocationList(self, x509_crl) + + def load_pem_x509_csr(self, data): + mem_bio = self._bytes_to_bio(data) + x509_req = self._lib.PEM_read_bio_X509_REQ( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if x509_req == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load request") + + x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) + return _CertificateSigningRequest(self, x509_req) + + def load_der_x509_csr(self, data): + mem_bio = self._bytes_to_bio(data) + x509_req = self._lib.d2i_X509_REQ_bio(mem_bio.bio, self._ffi.NULL) + if x509_req == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load request") + + x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) + return _CertificateSigningRequest(self, x509_req) + + def _load_key(self, openssl_read_func, convert_func, data, password): + mem_bio = self._bytes_to_bio(data) + + if password is not None and not isinstance(password, bytes): + raise TypeError("Password must be bytes") + + userdata = self._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *") + if password is not None: + password_buf = self._ffi.new("char []", password) + userdata.password = password_buf + userdata.length = len(password) + + evp_pkey = openssl_read_func( + mem_bio.bio, + self._ffi.NULL, + self._ffi.addressof( + self._lib._original_lib, "Cryptography_pem_password_cb" + ), + userdata, + ) + + if evp_pkey == self._ffi.NULL: + if userdata.error != 0: + errors = self._consume_errors() + self.openssl_assert(errors) + if userdata.error == -1: + raise TypeError( + "Password was not given but private key is encrypted" + ) + else: + assert userdata.error == -2 + raise ValueError( + "Passwords longer than {0} bytes are not supported " + "by this backend.".format(userdata.maxsize - 1) + ) + else: + self._handle_key_loading_error() + + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + if password is not None and userdata.called == 0: + raise TypeError( + "Password was given but private key is not encrypted.") + + assert ( + (password is not None and userdata.called == 1) or + password is None + ) + + return convert_func(evp_pkey) + + def _handle_key_loading_error(self): + errors = self._consume_errors() + + if not errors: + raise ValueError("Could not deserialize key data.") + + elif ( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EVP, self._lib.EVP_R_BAD_DECRYPT + ) or errors[0]._lib_reason_match( + self._lib.ERR_LIB_PKCS12, + self._lib.PKCS12_R_PKCS12_CIPHERFINAL_ERROR + ) + ): + raise ValueError("Bad decrypt. Incorrect password?") + + elif ( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EVP, self._lib.EVP_R_UNKNOWN_PBE_ALGORITHM + ) or errors[0]._lib_reason_match( + self._lib.ERR_LIB_PEM, self._lib.PEM_R_UNSUPPORTED_ENCRYPTION + ) + ): + raise UnsupportedAlgorithm( + "PEM data is encrypted with an unsupported cipher", + _Reasons.UNSUPPORTED_CIPHER + ) + + elif any( + error._lib_reason_match( + self._lib.ERR_LIB_EVP, + self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM + ) + for error in errors + ): + raise ValueError("Unsupported public key algorithm.") + + else: + assert errors[0].lib in ( + self._lib.ERR_LIB_EVP, + self._lib.ERR_LIB_PEM, + self._lib.ERR_LIB_ASN1, + ) + raise ValueError("Could not deserialize key data.") + + def elliptic_curve_supported(self, curve): + try: + curve_nid = self._elliptic_curve_to_nid(curve) + except UnsupportedAlgorithm: + curve_nid = self._lib.NID_undef + + group = self._lib.EC_GROUP_new_by_curve_name(curve_nid) + + if group == self._ffi.NULL: + errors = self._consume_errors() + self.openssl_assert( + curve_nid == self._lib.NID_undef or + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EC, + self._lib.EC_R_UNKNOWN_GROUP + ) + ) + return False + else: + self.openssl_assert(curve_nid != self._lib.NID_undef) + self._lib.EC_GROUP_free(group) + return True + + def elliptic_curve_signature_algorithm_supported( + self, signature_algorithm, curve + ): + # We only support ECDSA right now. + if not isinstance(signature_algorithm, ec.ECDSA): + return False + + return self.elliptic_curve_supported(curve) + + def generate_elliptic_curve_private_key(self, curve): + """ + Generate a new private key on the named curve. + """ + + if self.elliptic_curve_supported(curve): + curve_nid = self._elliptic_curve_to_nid(curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + res = self._lib.EC_KEY_generate_key(ec_cdata) + self.openssl_assert(res == 1) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + else: + raise UnsupportedAlgorithm( + "Backend object does not support {0}.".format(curve.name), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE + ) + + def load_elliptic_curve_private_numbers(self, numbers): + public = numbers.public_numbers + + curve_nid = self._elliptic_curve_to_nid(public.curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + private_value = self._ffi.gc( + self._int_to_bn(numbers.private_value), self._lib.BN_clear_free + ) + res = self._lib.EC_KEY_set_private_key(ec_cdata, private_value) + self.openssl_assert(res == 1) + + ec_cdata = self._ec_key_set_public_key_affine_coordinates( + ec_cdata, public.x, public.y) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + + def load_elliptic_curve_public_numbers(self, numbers): + curve_nid = self._elliptic_curve_to_nid(numbers.curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + ec_cdata = self._ec_key_set_public_key_affine_coordinates( + ec_cdata, numbers.x, numbers.y) + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + + def derive_elliptic_curve_private_key(self, private_value, curve): + curve_nid = self._elliptic_curve_to_nid(curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + get_func, group = self._ec_key_determine_group_get_func(ec_cdata) + + point = self._lib.EC_POINT_new(group) + self.openssl_assert(point != self._ffi.NULL) + point = self._ffi.gc(point, self._lib.EC_POINT_free) + + value = self._int_to_bn(private_value) + value = self._ffi.gc(value, self._lib.BN_clear_free) + + with self._tmp_bn_ctx() as bn_ctx: + res = self._lib.EC_POINT_mul(group, point, value, self._ffi.NULL, + self._ffi.NULL, bn_ctx) + self.openssl_assert(res == 1) + + bn_x = self._lib.BN_CTX_get(bn_ctx) + bn_y = self._lib.BN_CTX_get(bn_ctx) + + res = get_func(group, point, bn_x, bn_y, bn_ctx) + self.openssl_assert(res == 1) + + res = self._lib.EC_KEY_set_public_key(ec_cdata, point) + self.openssl_assert(res == 1) + private = self._int_to_bn(private_value) + private = self._ffi.gc(private, self._lib.BN_clear_free) + res = self._lib.EC_KEY_set_private_key(ec_cdata, private) + self.openssl_assert(res == 1) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + + def elliptic_curve_exchange_algorithm_supported(self, algorithm, curve): + return ( + self.elliptic_curve_supported(curve) and + isinstance(algorithm, ec.ECDH) + ) + + def _ec_cdata_to_evp_pkey(self, ec_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_EC_KEY(evp_pkey, ec_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def _elliptic_curve_to_nid(self, curve): + """ + Get the NID for a curve name. + """ + + curve_aliases = { + "secp192r1": "prime192v1", + "secp256r1": "prime256v1" + } + + curve_name = curve_aliases.get(curve.name, curve.name) + + curve_nid = self._lib.OBJ_sn2nid(curve_name.encode()) + if curve_nid == self._lib.NID_undef: + raise UnsupportedAlgorithm( + "{0} is not a supported elliptic curve".format(curve.name), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE + ) + return curve_nid + + @contextmanager + def _tmp_bn_ctx(self): + bn_ctx = self._lib.BN_CTX_new() + self.openssl_assert(bn_ctx != self._ffi.NULL) + bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free) + self._lib.BN_CTX_start(bn_ctx) + try: + yield bn_ctx + finally: + self._lib.BN_CTX_end(bn_ctx) + + def _ec_key_determine_group_get_func(self, ctx): + """ + Given an EC_KEY determine the group and what function is required to + get point coordinates. + """ + self.openssl_assert(ctx != self._ffi.NULL) + + nid_two_field = self._lib.OBJ_sn2nid(b"characteristic-two-field") + self.openssl_assert(nid_two_field != self._lib.NID_undef) + + group = self._lib.EC_KEY_get0_group(ctx) + self.openssl_assert(group != self._ffi.NULL) + + method = self._lib.EC_GROUP_method_of(group) + self.openssl_assert(method != self._ffi.NULL) + + nid = self._lib.EC_METHOD_get_field_type(method) + self.openssl_assert(nid != self._lib.NID_undef) + + if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M: + get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m + else: + get_func = self._lib.EC_POINT_get_affine_coordinates_GFp + + assert get_func + + return get_func, group + + def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y): + """ + Sets the public key point in the EC_KEY context to the affine x and y + values. + """ + + if x < 0 or y < 0: + raise ValueError( + "Invalid EC key. Both x and y must be non-negative." + ) + + x = self._ffi.gc(self._int_to_bn(x), self._lib.BN_free) + y = self._ffi.gc(self._int_to_bn(y), self._lib.BN_free) + res = self._lib.EC_KEY_set_public_key_affine_coordinates(ctx, x, y) + if res != 1: + self._consume_errors() + raise ValueError("Invalid EC key.") + + return ctx + + def _private_key_bytes(self, encoding, format, encryption_algorithm, + evp_pkey, cdata): + if not isinstance(format, serialization.PrivateFormat): + raise TypeError( + "format must be an item from the PrivateFormat enum" + ) + + if not isinstance(encryption_algorithm, + serialization.KeySerializationEncryption): + raise TypeError( + "Encryption algorithm must be a KeySerializationEncryption " + "instance" + ) + + if isinstance(encryption_algorithm, serialization.NoEncryption): + password = b"" + passlen = 0 + evp_cipher = self._ffi.NULL + elif isinstance(encryption_algorithm, + serialization.BestAvailableEncryption): + # This is a curated value that we will update over time. + evp_cipher = self._lib.EVP_get_cipherbyname( + b"aes-256-cbc" + ) + password = encryption_algorithm.password + passlen = len(password) + if passlen > 1023: + raise ValueError( + "Passwords longer than 1023 bytes are not supported by " + "this backend" + ) + else: + raise ValueError("Unsupported encryption type") + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + if encoding is serialization.Encoding.PEM: + if format is serialization.PrivateFormat.PKCS8: + write_bio = self._lib.PEM_write_bio_PKCS8PrivateKey + key = evp_pkey + else: + assert format is serialization.PrivateFormat.TraditionalOpenSSL + if key_type == self._lib.EVP_PKEY_RSA: + write_bio = self._lib.PEM_write_bio_RSAPrivateKey + elif key_type == self._lib.EVP_PKEY_DSA: + write_bio = self._lib.PEM_write_bio_DSAPrivateKey + else: + assert key_type == self._lib.EVP_PKEY_EC + write_bio = self._lib.PEM_write_bio_ECPrivateKey + + key = cdata + elif encoding is serialization.Encoding.DER: + if format is serialization.PrivateFormat.TraditionalOpenSSL: + if not isinstance( + encryption_algorithm, serialization.NoEncryption + ): + raise ValueError( + "Encryption is not supported for DER encoded " + "traditional OpenSSL keys" + ) + + return self._private_key_bytes_traditional_der(key_type, cdata) + else: + assert format is serialization.PrivateFormat.PKCS8 + write_bio = self._lib.i2d_PKCS8PrivateKey_bio + key = evp_pkey + else: + raise TypeError("encoding must be an item from the Encoding enum") + + bio = self._create_mem_bio_gc() + res = write_bio( + bio, + key, + evp_cipher, + password, + passlen, + self._ffi.NULL, + self._ffi.NULL + ) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def _private_key_bytes_traditional_der(self, key_type, cdata): + if key_type == self._lib.EVP_PKEY_RSA: + write_bio = self._lib.i2d_RSAPrivateKey_bio + elif key_type == self._lib.EVP_PKEY_EC: + write_bio = self._lib.i2d_ECPrivateKey_bio + else: + self.openssl_assert(key_type == self._lib.EVP_PKEY_DSA) + write_bio = self._lib.i2d_DSAPrivateKey_bio + + bio = self._create_mem_bio_gc() + res = write_bio(bio, cdata) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def _public_key_bytes(self, encoding, format, key, evp_pkey, cdata): + if not isinstance(encoding, serialization.Encoding): + raise TypeError("encoding must be an item from the Encoding enum") + + if ( + format is serialization.PublicFormat.OpenSSH or + encoding is serialization.Encoding.OpenSSH + ): + if ( + format is not serialization.PublicFormat.OpenSSH or + encoding is not serialization.Encoding.OpenSSH + ): + raise ValueError( + "OpenSSH format must be used with OpenSSH encoding" + ) + return self._openssh_public_key_bytes(key) + elif format is serialization.PublicFormat.SubjectPublicKeyInfo: + if encoding is serialization.Encoding.PEM: + write_bio = self._lib.PEM_write_bio_PUBKEY + else: + assert encoding is serialization.Encoding.DER + write_bio = self._lib.i2d_PUBKEY_bio + + key = evp_pkey + elif format is serialization.PublicFormat.PKCS1: + # Only RSA is supported here. + assert self._lib.EVP_PKEY_id(evp_pkey) == self._lib.EVP_PKEY_RSA + if encoding is serialization.Encoding.PEM: + write_bio = self._lib.PEM_write_bio_RSAPublicKey + else: + assert encoding is serialization.Encoding.DER + write_bio = self._lib.i2d_RSAPublicKey_bio + + key = cdata + else: + raise TypeError( + "format must be an item from the PublicFormat enum" + ) + + bio = self._create_mem_bio_gc() + res = write_bio(bio, key) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def _openssh_public_key_bytes(self, key): + if isinstance(key, rsa.RSAPublicKey): + public_numbers = key.public_numbers() + return b"ssh-rsa " + base64.b64encode( + serialization._ssh_write_string(b"ssh-rsa") + + serialization._ssh_write_mpint(public_numbers.e) + + serialization._ssh_write_mpint(public_numbers.n) + ) + elif isinstance(key, dsa.DSAPublicKey): + public_numbers = key.public_numbers() + parameter_numbers = public_numbers.parameter_numbers + return b"ssh-dss " + base64.b64encode( + serialization._ssh_write_string(b"ssh-dss") + + serialization._ssh_write_mpint(parameter_numbers.p) + + serialization._ssh_write_mpint(parameter_numbers.q) + + serialization._ssh_write_mpint(parameter_numbers.g) + + serialization._ssh_write_mpint(public_numbers.y) + ) + else: + assert isinstance(key, ec.EllipticCurvePublicKey) + public_numbers = key.public_numbers() + try: + curve_name = { + ec.SECP256R1: b"nistp256", + ec.SECP384R1: b"nistp384", + ec.SECP521R1: b"nistp521", + }[type(public_numbers.curve)] + except KeyError: + raise ValueError( + "Only SECP256R1, SECP384R1, and SECP521R1 curves are " + "supported by the SSH public key format" + ) + return b"ecdsa-sha2-" + curve_name + b" " + base64.b64encode( + serialization._ssh_write_string(b"ecdsa-sha2-" + curve_name) + + serialization._ssh_write_string(curve_name) + + serialization._ssh_write_string(public_numbers.encode_point()) + ) + + def _parameter_bytes(self, encoding, format, cdata): + if encoding is serialization.Encoding.OpenSSH: + raise TypeError( + "OpenSSH encoding is not supported" + ) + + # Only DH is supported here currently. + q = self._ffi.new("BIGNUM **") + self._lib.DH_get0_pqg(cdata, + self._ffi.NULL, + q, + self._ffi.NULL) + if encoding is serialization.Encoding.PEM: + if q[0] != self._ffi.NULL: + write_bio = self._lib.PEM_write_bio_DHxparams + else: + write_bio = self._lib.PEM_write_bio_DHparams + elif encoding is serialization.Encoding.DER: + if q[0] != self._ffi.NULL: + write_bio = self._lib.Cryptography_i2d_DHxparams_bio + else: + write_bio = self._lib.i2d_DHparams_bio + else: + raise TypeError("encoding must be an item from the Encoding enum") + + bio = self._create_mem_bio_gc() + res = write_bio(bio, cdata) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def generate_dh_parameters(self, generator, key_size): + if key_size < 512: + raise ValueError("DH key_size must be at least 512 bits") + + if generator not in (2, 5): + raise ValueError("DH generator must be 2 or 5") + + dh_param_cdata = self._lib.DH_new() + self.openssl_assert(dh_param_cdata != self._ffi.NULL) + dh_param_cdata = self._ffi.gc(dh_param_cdata, self._lib.DH_free) + + res = self._lib.DH_generate_parameters_ex( + dh_param_cdata, + key_size, + generator, + self._ffi.NULL + ) + self.openssl_assert(res == 1) + + return _DHParameters(self, dh_param_cdata) + + def _dh_cdata_to_evp_pkey(self, dh_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_DH(evp_pkey, dh_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def generate_dh_private_key(self, parameters): + dh_key_cdata = _dh_params_dup(parameters._dh_cdata, self) + + res = self._lib.DH_generate_key(dh_key_cdata) + self.openssl_assert(res == 1) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_key_cdata) + + return _DHPrivateKey(self, dh_key_cdata, evp_pkey) + + def generate_dh_private_key_and_parameters(self, generator, key_size): + return self.generate_dh_private_key( + self.generate_dh_parameters(generator, key_size)) + + def load_dh_private_numbers(self, numbers): + parameter_numbers = numbers.public_numbers.parameter_numbers + + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(parameter_numbers.p) + g = self._int_to_bn(parameter_numbers.g) + + if parameter_numbers.q is not None: + q = self._int_to_bn(parameter_numbers.q) + else: + q = self._ffi.NULL + + pub_key = self._int_to_bn(numbers.public_numbers.y) + priv_key = self._int_to_bn(numbers.x) + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + res = self._lib.DH_set0_key(dh_cdata, pub_key, priv_key) + self.openssl_assert(res == 1) + + codes = self._ffi.new("int[]", 1) + res = self._lib.Cryptography_DH_check(dh_cdata, codes) + self.openssl_assert(res == 1) + + # DH_check will return DH_NOT_SUITABLE_GENERATOR if p % 24 does not + # equal 11 when the generator is 2 (a quadratic nonresidue). + # We want to ignore that error because p % 24 == 23 is also fine. + # Specifically, g is then a quadratic residue. Within the context of + # Diffie-Hellman this means it can only generate half the possible + # values. That sounds bad, but quadratic nonresidues leak a bit of + # the key to the attacker in exchange for having the full key space + # available. See: https://crypto.stackexchange.com/questions/12961 + if codes[0] != 0 and not ( + parameter_numbers.g == 2 and + codes[0] ^ self._lib.DH_NOT_SUITABLE_GENERATOR == 0 + ): + raise ValueError( + "DH private numbers did not pass safety checks." + ) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_cdata) + + return _DHPrivateKey(self, dh_cdata, evp_pkey) + + def load_dh_public_numbers(self, numbers): + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + parameter_numbers = numbers.parameter_numbers + + p = self._int_to_bn(parameter_numbers.p) + g = self._int_to_bn(parameter_numbers.g) + + if parameter_numbers.q is not None: + q = self._int_to_bn(parameter_numbers.q) + else: + q = self._ffi.NULL + + pub_key = self._int_to_bn(numbers.y) + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + res = self._lib.DH_set0_key(dh_cdata, pub_key, self._ffi.NULL) + self.openssl_assert(res == 1) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_cdata) + + return _DHPublicKey(self, dh_cdata, evp_pkey) + + def load_dh_parameter_numbers(self, numbers): + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(numbers.p) + g = self._int_to_bn(numbers.g) + + if numbers.q is not None: + q = self._int_to_bn(numbers.q) + else: + q = self._ffi.NULL + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + return _DHParameters(self, dh_cdata) + + def dh_parameters_supported(self, p, g, q=None): + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(p) + g = self._int_to_bn(g) + + if q is not None: + q = self._int_to_bn(q) + else: + q = self._ffi.NULL + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + codes = self._ffi.new("int[]", 1) + res = self._lib.Cryptography_DH_check(dh_cdata, codes) + self.openssl_assert(res == 1) + + return codes[0] == 0 + + def dh_x942_serialization_supported(self): + return self._lib.Cryptography_HAS_EVP_PKEY_DHX == 1 + + def x509_name_bytes(self, name): + x509_name = _encode_name_gc(self, name) + pp = self._ffi.new("unsigned char **") + res = self._lib.i2d_X509_NAME(x509_name, pp) + self.openssl_assert(pp[0] != self._ffi.NULL) + pp = self._ffi.gc( + pp, lambda pointer: self._lib.OPENSSL_free(pointer[0]) + ) + self.openssl_assert(res > 0) + return self._ffi.buffer(pp[0], res)[:] + + def x25519_load_public_bytes(self, data): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set_type(evp_pkey, self._lib.NID_X25519) + backend.openssl_assert(res == 1) + res = self._lib.EVP_PKEY_set1_tls_encodedpoint( + evp_pkey, data, len(data) + ) + backend.openssl_assert(res == 1) + return _X25519PublicKey(self, evp_pkey) + + def x25519_load_private_bytes(self, data): + # OpenSSL only has facilities for loading PKCS8 formatted private + # keys using the algorithm identifiers specified in + # https://tools.ietf.org/html/draft-ietf-curdle-pkix-09. + # This is the standard PKCS8 prefix for a 32 byte X25519 key. + # The form is: + # 0:d=0 hl=2 l= 46 cons: SEQUENCE + # 2:d=1 hl=2 l= 1 prim: INTEGER :00 + # 5:d=1 hl=2 l= 5 cons: SEQUENCE + # 7:d=2 hl=2 l= 3 prim: OBJECT :1.3.101.110 + # 12:d=1 hl=2 l= 34 prim: OCTET STRING (the key) + # Of course there's a bit more complexity. In reality OCTET STRING + # contains an OCTET STRING of length 32! So the last two bytes here + # are \x04\x20, which is an OCTET STRING of length 32. + pkcs8_prefix = b'0.\x02\x01\x000\x05\x06\x03+en\x04"\x04 ' + bio = self._bytes_to_bio(pkcs8_prefix + data) + evp_pkey = backend._lib.d2i_PrivateKey_bio(bio.bio, self._ffi.NULL) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + self.openssl_assert( + self._lib.EVP_PKEY_id(evp_pkey) == self._lib.EVP_PKEY_X25519 + ) + return _X25519PrivateKey(self, evp_pkey) + + def x25519_generate_key(self): + evp_pkey_ctx = self._lib.EVP_PKEY_CTX_new_id( + self._lib.NID_X25519, self._ffi.NULL + ) + self.openssl_assert(evp_pkey_ctx != self._ffi.NULL) + evp_pkey_ctx = self._ffi.gc( + evp_pkey_ctx, self._lib.EVP_PKEY_CTX_free + ) + res = self._lib.EVP_PKEY_keygen_init(evp_pkey_ctx) + self.openssl_assert(res == 1) + evp_ppkey = self._ffi.new("EVP_PKEY **") + res = self._lib.EVP_PKEY_keygen(evp_pkey_ctx, evp_ppkey) + self.openssl_assert(res == 1) + self.openssl_assert(evp_ppkey[0] != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_ppkey[0], self._lib.EVP_PKEY_free) + return _X25519PrivateKey(self, evp_pkey) + + def x25519_supported(self): + return self._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER + + def derive_scrypt(self, key_material, salt, length, n, r, p): + buf = self._ffi.new("unsigned char[]", length) + res = self._lib.EVP_PBE_scrypt( + key_material, len(key_material), salt, len(salt), n, r, p, + scrypt._MEM_LIMIT, buf, length + ) + self.openssl_assert(res == 1) + return self._ffi.buffer(buf)[:] + + def aead_cipher_supported(self, cipher): + cipher_name = aead._aead_cipher_name(cipher) + return ( + self._lib.EVP_get_cipherbyname(cipher_name) != self._ffi.NULL + ) + + +class GetCipherByName(object): + def __init__(self, fmt): + self._fmt = fmt + + def __call__(self, backend, cipher, mode): + cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower() + return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii")) + + +def _get_xts_cipher(backend, cipher, mode): + cipher_name = "aes-{0}-xts".format(cipher.key_size // 2) + return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii")) + + +backend = Backend() diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/ciphers.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/ciphers.py new file mode 100644 index 0000000..e0ee06e --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/ciphers.py @@ -0,0 +1,222 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import InvalidTag, UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import ciphers +from cryptography.hazmat.primitives.ciphers import modes + + +@utils.register_interface(ciphers.CipherContext) +@utils.register_interface(ciphers.AEADCipherContext) +@utils.register_interface(ciphers.AEADEncryptionContext) +@utils.register_interface(ciphers.AEADDecryptionContext) +class _CipherContext(object): + _ENCRYPT = 1 + _DECRYPT = 0 + + def __init__(self, backend, cipher, mode, operation): + self._backend = backend + self._cipher = cipher + self._mode = mode + self._operation = operation + self._tag = None + + if isinstance(self._cipher, ciphers.BlockCipherAlgorithm): + self._block_size_bytes = self._cipher.block_size // 8 + else: + self._block_size_bytes = 1 + + ctx = self._backend._lib.EVP_CIPHER_CTX_new() + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.EVP_CIPHER_CTX_free + ) + + registry = self._backend._cipher_registry + try: + adapter = registry[type(cipher), type(mode)] + except KeyError: + raise UnsupportedAlgorithm( + "cipher {0} in {1} mode is not supported " + "by this backend.".format( + cipher.name, mode.name if mode else mode), + _Reasons.UNSUPPORTED_CIPHER + ) + + evp_cipher = adapter(self._backend, cipher, mode) + if evp_cipher == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "cipher {0} in {1} mode is not supported " + "by this backend.".format( + cipher.name, mode.name if mode else mode), + _Reasons.UNSUPPORTED_CIPHER + ) + + if isinstance(mode, modes.ModeWithInitializationVector): + iv_nonce = mode.initialization_vector + elif isinstance(mode, modes.ModeWithTweak): + iv_nonce = mode.tweak + elif isinstance(mode, modes.ModeWithNonce): + iv_nonce = mode.nonce + elif isinstance(cipher, modes.ModeWithNonce): + iv_nonce = cipher.nonce + else: + iv_nonce = self._backend._ffi.NULL + # begin init with cipher and operation type + res = self._backend._lib.EVP_CipherInit_ex(ctx, evp_cipher, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + operation) + self._backend.openssl_assert(res != 0) + # set the key length to handle variable key ciphers + res = self._backend._lib.EVP_CIPHER_CTX_set_key_length( + ctx, len(cipher.key) + ) + self._backend.openssl_assert(res != 0) + if isinstance(mode, modes.GCM): + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, self._backend._lib.EVP_CTRL_AEAD_SET_IVLEN, + len(iv_nonce), self._backend._ffi.NULL + ) + self._backend.openssl_assert(res != 0) + if mode.tag is not None: + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, + len(mode.tag), mode.tag + ) + self._backend.openssl_assert(res != 0) + self._tag = mode.tag + elif ( + self._operation == self._DECRYPT and + self._backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and + not self._backend._lib.CRYPTOGRAPHY_IS_LIBRESSL + ): + raise NotImplementedError( + "delayed passing of GCM tag requires OpenSSL >= 1.0.2." + " To use this feature please update OpenSSL" + ) + + # pass key/iv + res = self._backend._lib.EVP_CipherInit_ex( + ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + cipher.key, + iv_nonce, + operation + ) + self._backend.openssl_assert(res != 0) + # We purposely disable padding here as it's handled higher up in the + # API. + self._backend._lib.EVP_CIPHER_CTX_set_padding(ctx, 0) + self._ctx = ctx + + def update(self, data): + buf = bytearray(len(data) + self._block_size_bytes - 1) + n = self.update_into(data, buf) + return bytes(buf[:n]) + + def update_into(self, data, buf): + if len(buf) < (len(data) + self._block_size_bytes - 1): + raise ValueError( + "buffer must be at least {0} bytes for this " + "payload".format(len(data) + self._block_size_bytes - 1) + ) + + buf = self._backend._ffi.cast( + "unsigned char *", self._backend._ffi.from_buffer(buf) + ) + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherUpdate(self._ctx, buf, outlen, + data, len(data)) + self._backend.openssl_assert(res != 0) + return outlen[0] + + def finalize(self): + # OpenSSL 1.0.1 on Ubuntu 12.04 (and possibly other distributions) + # appears to have a bug where you must make at least one call to update + # even if you are only using authenticate_additional_data or the + # GCM tag will be wrong. An (empty) call to update resolves this + # and is harmless for all other versions of OpenSSL. + if isinstance(self._mode, modes.GCM): + self.update(b"") + + if ( + self._operation == self._DECRYPT and + isinstance(self._mode, modes.ModeWithAuthenticationTag) and + self.tag is None + ): + raise ValueError( + "Authentication tag must be provided when decrypting." + ) + + buf = self._backend._ffi.new("unsigned char[]", self._block_size_bytes) + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherFinal_ex(self._ctx, buf, outlen) + if res == 0: + errors = self._backend._consume_errors() + + if not errors and isinstance(self._mode, modes.GCM): + raise InvalidTag + + self._backend.openssl_assert( + errors[0]._lib_reason_match( + self._backend._lib.ERR_LIB_EVP, + self._backend._lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH + ) + ) + raise ValueError( + "The length of the provided data is not a multiple of " + "the block length." + ) + + if (isinstance(self._mode, modes.GCM) and + self._operation == self._ENCRYPT): + tag_buf = self._backend._ffi.new( + "unsigned char[]", self._block_size_bytes + ) + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + self._ctx, self._backend._lib.EVP_CTRL_AEAD_GET_TAG, + self._block_size_bytes, tag_buf + ) + self._backend.openssl_assert(res != 0) + self._tag = self._backend._ffi.buffer(tag_buf)[:] + + res = self._backend._lib.EVP_CIPHER_CTX_cleanup(self._ctx) + self._backend.openssl_assert(res == 1) + return self._backend._ffi.buffer(buf)[:outlen[0]] + + def finalize_with_tag(self, tag): + if ( + self._backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and + not self._backend._lib.CRYPTOGRAPHY_IS_LIBRESSL + ): + raise NotImplementedError( + "finalize_with_tag requires OpenSSL >= 1.0.2. To use this " + "method please update OpenSSL" + ) + if len(tag) < self._mode._min_tag_length: + raise ValueError( + "Authentication tag must be {0} bytes or longer.".format( + self._mode._min_tag_length) + ) + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + self._ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, + len(tag), tag + ) + self._backend.openssl_assert(res != 0) + self._tag = tag + return self.finalize() + + def authenticate_additional_data(self, data): + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherUpdate( + self._ctx, self._backend._ffi.NULL, outlen, data, len(data) + ) + self._backend.openssl_assert(res != 0) + + tag = utils.read_only_property("_tag") diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/cmac.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/cmac.py new file mode 100644 index 0000000..e20f66d --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/cmac.py @@ -0,0 +1,81 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.primitives import constant_time, mac +from cryptography.hazmat.primitives.ciphers.modes import CBC + + +@utils.register_interface(mac.MACContext) +class _CMACContext(object): + def __init__(self, backend, algorithm, ctx=None): + if not backend.cmac_algorithm_supported(algorithm): + raise UnsupportedAlgorithm("This backend does not support CMAC.", + _Reasons.UNSUPPORTED_CIPHER) + + self._backend = backend + self._key = algorithm.key + self._algorithm = algorithm + self._output_length = algorithm.block_size // 8 + + if ctx is None: + registry = self._backend._cipher_registry + adapter = registry[type(algorithm), CBC] + + evp_cipher = adapter(self._backend, algorithm, CBC) + + ctx = self._backend._lib.CMAC_CTX_new() + + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.CMAC_CTX_free) + + res = self._backend._lib.CMAC_Init( + ctx, self._key, len(self._key), + evp_cipher, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res == 1) + + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def update(self, data): + res = self._backend._lib.CMAC_Update(self._ctx, data, len(data)) + self._backend.openssl_assert(res == 1) + + def finalize(self): + buf = self._backend._ffi.new("unsigned char[]", self._output_length) + length = self._backend._ffi.new("size_t *", self._output_length) + res = self._backend._lib.CMAC_Final( + self._ctx, buf, length + ) + self._backend.openssl_assert(res == 1) + + self._ctx = None + + return self._backend._ffi.buffer(buf)[:] + + def copy(self): + copied_ctx = self._backend._lib.CMAC_CTX_new() + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.CMAC_CTX_free + ) + res = self._backend._lib.CMAC_CTX_copy( + copied_ctx, self._ctx + ) + self._backend.openssl_assert(res == 1) + return _CMACContext( + self._backend, self._algorithm, ctx=copied_ctx + ) + + def verify(self, signature): + digest = self.finalize() + if not constant_time.bytes_eq(digest, signature): + raise InvalidSignature("Signature did not match digest.") diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/decode_asn1.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/decode_asn1.py new file mode 100644 index 0000000..31fb8cf --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/decode_asn1.py @@ -0,0 +1,826 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import datetime +import ipaddress + +from asn1crypto.core import Integer, SequenceOf + +from cryptography import x509 +from cryptography.x509.extensions import _TLS_FEATURE_TYPE_TO_ENUM +from cryptography.x509.name import _ASN1_TYPE_TO_ENUM +from cryptography.x509.oid import ( + CRLEntryExtensionOID, CertificatePoliciesOID, ExtensionOID +) + + +class _Integers(SequenceOf): + _child_spec = Integer + + +def _obj2txt(backend, obj): + # Set to 80 on the recommendation of + # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values + # + # But OIDs longer than this occur in real life (e.g. Active + # Directory makes some very long OIDs). So we need to detect + # and properly handle the case where the default buffer is not + # big enough. + # + buf_len = 80 + buf = backend._ffi.new("char[]", buf_len) + + # 'res' is the number of bytes that *would* be written if the + # buffer is large enough. If 'res' > buf_len - 1, we need to + # alloc a big-enough buffer and go again. + res = backend._lib.OBJ_obj2txt(buf, buf_len, obj, 1) + if res > buf_len - 1: # account for terminating null byte + buf_len = res + 1 + buf = backend._ffi.new("char[]", buf_len) + res = backend._lib.OBJ_obj2txt(buf, buf_len, obj, 1) + backend.openssl_assert(res > 0) + return backend._ffi.buffer(buf, res)[:].decode() + + +def _decode_x509_name_entry(backend, x509_name_entry): + obj = backend._lib.X509_NAME_ENTRY_get_object(x509_name_entry) + backend.openssl_assert(obj != backend._ffi.NULL) + data = backend._lib.X509_NAME_ENTRY_get_data(x509_name_entry) + backend.openssl_assert(data != backend._ffi.NULL) + value = _asn1_string_to_utf8(backend, data) + oid = _obj2txt(backend, obj) + type = _ASN1_TYPE_TO_ENUM[data.type] + + return x509.NameAttribute(x509.ObjectIdentifier(oid), value, type) + + +def _decode_x509_name(backend, x509_name): + count = backend._lib.X509_NAME_entry_count(x509_name) + attributes = [] + prev_set_id = -1 + for x in range(count): + entry = backend._lib.X509_NAME_get_entry(x509_name, x) + attribute = _decode_x509_name_entry(backend, entry) + set_id = backend._lib.Cryptography_X509_NAME_ENTRY_set(entry) + if set_id != prev_set_id: + attributes.append(set([attribute])) + else: + # is in the same RDN a previous entry + attributes[-1].add(attribute) + prev_set_id = set_id + + return x509.Name(x509.RelativeDistinguishedName(rdn) for rdn in attributes) + + +def _decode_general_names(backend, gns): + num = backend._lib.sk_GENERAL_NAME_num(gns) + names = [] + for i in range(num): + gn = backend._lib.sk_GENERAL_NAME_value(gns, i) + backend.openssl_assert(gn != backend._ffi.NULL) + names.append(_decode_general_name(backend, gn)) + + return names + + +def _decode_general_name(backend, gn): + if gn.type == backend._lib.GEN_DNS: + # Convert to bytes and then decode to utf8. We don't use + # asn1_string_to_utf8 here because it doesn't properly convert + # utf8 from ia5strings. + data = _asn1_string_to_bytes(backend, gn.d.dNSName).decode("utf8") + # We don't use the constructor for DNSName so we can bypass validation + # This allows us to create DNSName objects that have unicode chars + # when a certificate (against the RFC) contains them. + return x509.DNSName._init_without_validation(data) + elif gn.type == backend._lib.GEN_URI: + # Convert to bytes and then decode to utf8. We don't use + # asn1_string_to_utf8 here because it doesn't properly convert + # utf8 from ia5strings. + data = _asn1_string_to_bytes( + backend, gn.d.uniformResourceIdentifier + ).decode("utf8") + # We don't use the constructor for URI so we can bypass validation + # This allows us to create URI objects that have unicode chars + # when a certificate (against the RFC) contains them. + return x509.UniformResourceIdentifier._init_without_validation(data) + elif gn.type == backend._lib.GEN_RID: + oid = _obj2txt(backend, gn.d.registeredID) + return x509.RegisteredID(x509.ObjectIdentifier(oid)) + elif gn.type == backend._lib.GEN_IPADD: + data = _asn1_string_to_bytes(backend, gn.d.iPAddress) + data_len = len(data) + if data_len == 8 or data_len == 32: + # This is an IPv4 or IPv6 Network and not a single IP. This + # type of data appears in Name Constraints. Unfortunately, + # ipaddress doesn't support packed bytes + netmask. Additionally, + # IPv6Network can only handle CIDR rather than the full 16 byte + # netmask. To handle this we convert the netmask to integer, then + # find the first 0 bit, which will be the prefix. If another 1 + # bit is present after that the netmask is invalid. + base = ipaddress.ip_address(data[:data_len // 2]) + netmask = ipaddress.ip_address(data[data_len // 2:]) + bits = bin(int(netmask))[2:] + prefix = bits.find('0') + # If no 0 bits are found it is a /32 or /128 + if prefix == -1: + prefix = len(bits) + + if "1" in bits[prefix:]: + raise ValueError("Invalid netmask") + + ip = ipaddress.ip_network(base.exploded + u"/{0}".format(prefix)) + else: + ip = ipaddress.ip_address(data) + + return x509.IPAddress(ip) + elif gn.type == backend._lib.GEN_DIRNAME: + return x509.DirectoryName( + _decode_x509_name(backend, gn.d.directoryName) + ) + elif gn.type == backend._lib.GEN_EMAIL: + # Convert to bytes and then decode to utf8. We don't use + # asn1_string_to_utf8 here because it doesn't properly convert + # utf8 from ia5strings. + data = _asn1_string_to_bytes(backend, gn.d.rfc822Name).decode("utf8") + # We don't use the constructor for RFC822Name so we can bypass + # validation. This allows us to create RFC822Name objects that have + # unicode chars when a certificate (against the RFC) contains them. + return x509.RFC822Name._init_without_validation(data) + elif gn.type == backend._lib.GEN_OTHERNAME: + type_id = _obj2txt(backend, gn.d.otherName.type_id) + value = _asn1_to_der(backend, gn.d.otherName.value) + return x509.OtherName(x509.ObjectIdentifier(type_id), value) + else: + # x400Address or ediPartyName + raise x509.UnsupportedGeneralNameType( + "{0} is not a supported type".format( + x509._GENERAL_NAMES.get(gn.type, gn.type) + ), + gn.type + ) + + +def _decode_ocsp_no_check(backend, ext): + return x509.OCSPNoCheck() + + +def _decode_crl_number(backend, ext): + asn1_int = backend._ffi.cast("ASN1_INTEGER *", ext) + asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free) + return x509.CRLNumber(_asn1_integer_to_int(backend, asn1_int)) + + +def _decode_delta_crl_indicator(backend, ext): + asn1_int = backend._ffi.cast("ASN1_INTEGER *", ext) + asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free) + return x509.DeltaCRLIndicator(_asn1_integer_to_int(backend, asn1_int)) + + +class _X509ExtensionParser(object): + def __init__(self, ext_count, get_ext, handlers): + self.ext_count = ext_count + self.get_ext = get_ext + self.handlers = handlers + + def parse(self, backend, x509_obj): + extensions = [] + seen_oids = set() + for i in range(self.ext_count(backend, x509_obj)): + ext = self.get_ext(backend, x509_obj, i) + backend.openssl_assert(ext != backend._ffi.NULL) + crit = backend._lib.X509_EXTENSION_get_critical(ext) + critical = crit == 1 + oid = x509.ObjectIdentifier( + _obj2txt(backend, backend._lib.X509_EXTENSION_get_object(ext)) + ) + if oid in seen_oids: + raise x509.DuplicateExtension( + "Duplicate {0} extension found".format(oid), oid + ) + + # This OID is only supported in OpenSSL 1.1.0+ but we want + # to support it in all versions of OpenSSL so we decode it + # ourselves. + if oid == ExtensionOID.TLS_FEATURE: + data = backend._lib.X509_EXTENSION_get_data(ext) + parsed = _Integers.load(_asn1_string_to_bytes(backend, data)) + value = x509.TLSFeature( + [_TLS_FEATURE_TYPE_TO_ENUM[x.native] for x in parsed] + ) + extensions.append(x509.Extension(oid, critical, value)) + seen_oids.add(oid) + continue + + try: + handler = self.handlers[oid] + except KeyError: + # Dump the DER payload into an UnrecognizedExtension object + data = backend._lib.X509_EXTENSION_get_data(ext) + backend.openssl_assert(data != backend._ffi.NULL) + der = backend._ffi.buffer(data.data, data.length)[:] + unrecognized = x509.UnrecognizedExtension(oid, der) + extensions.append( + x509.Extension(oid, critical, unrecognized) + ) + else: + ext_data = backend._lib.X509V3_EXT_d2i(ext) + if ext_data == backend._ffi.NULL: + backend._consume_errors() + raise ValueError( + "The {0} extension is invalid and can't be " + "parsed".format(oid) + ) + + value = handler(backend, ext_data) + extensions.append(x509.Extension(oid, critical, value)) + + seen_oids.add(oid) + + return x509.Extensions(extensions) + + +def _decode_certificate_policies(backend, cp): + cp = backend._ffi.cast("Cryptography_STACK_OF_POLICYINFO *", cp) + cp = backend._ffi.gc(cp, backend._lib.CERTIFICATEPOLICIES_free) + + num = backend._lib.sk_POLICYINFO_num(cp) + certificate_policies = [] + for i in range(num): + qualifiers = None + pi = backend._lib.sk_POLICYINFO_value(cp, i) + oid = x509.ObjectIdentifier(_obj2txt(backend, pi.policyid)) + if pi.qualifiers != backend._ffi.NULL: + qnum = backend._lib.sk_POLICYQUALINFO_num(pi.qualifiers) + qualifiers = [] + for j in range(qnum): + pqi = backend._lib.sk_POLICYQUALINFO_value( + pi.qualifiers, j + ) + pqualid = x509.ObjectIdentifier( + _obj2txt(backend, pqi.pqualid) + ) + if pqualid == CertificatePoliciesOID.CPS_QUALIFIER: + cpsuri = backend._ffi.buffer( + pqi.d.cpsuri.data, pqi.d.cpsuri.length + )[:].decode('ascii') + qualifiers.append(cpsuri) + else: + assert pqualid == CertificatePoliciesOID.CPS_USER_NOTICE + user_notice = _decode_user_notice( + backend, pqi.d.usernotice + ) + qualifiers.append(user_notice) + + certificate_policies.append( + x509.PolicyInformation(oid, qualifiers) + ) + + return x509.CertificatePolicies(certificate_policies) + + +def _decode_user_notice(backend, un): + explicit_text = None + notice_reference = None + + if un.exptext != backend._ffi.NULL: + explicit_text = _asn1_string_to_utf8(backend, un.exptext) + + if un.noticeref != backend._ffi.NULL: + organization = _asn1_string_to_utf8( + backend, un.noticeref.organization + ) + + num = backend._lib.sk_ASN1_INTEGER_num( + un.noticeref.noticenos + ) + notice_numbers = [] + for i in range(num): + asn1_int = backend._lib.sk_ASN1_INTEGER_value( + un.noticeref.noticenos, i + ) + notice_num = _asn1_integer_to_int(backend, asn1_int) + notice_numbers.append(notice_num) + + notice_reference = x509.NoticeReference( + organization, notice_numbers + ) + + return x509.UserNotice(notice_reference, explicit_text) + + +def _decode_basic_constraints(backend, bc_st): + basic_constraints = backend._ffi.cast("BASIC_CONSTRAINTS *", bc_st) + basic_constraints = backend._ffi.gc( + basic_constraints, backend._lib.BASIC_CONSTRAINTS_free + ) + # The byte representation of an ASN.1 boolean true is \xff. OpenSSL + # chooses to just map this to its ordinal value, so true is 255 and + # false is 0. + ca = basic_constraints.ca == 255 + path_length = _asn1_integer_to_int_or_none( + backend, basic_constraints.pathlen + ) + + return x509.BasicConstraints(ca, path_length) + + +def _decode_subject_key_identifier(backend, asn1_string): + asn1_string = backend._ffi.cast("ASN1_OCTET_STRING *", asn1_string) + asn1_string = backend._ffi.gc( + asn1_string, backend._lib.ASN1_OCTET_STRING_free + ) + return x509.SubjectKeyIdentifier( + backend._ffi.buffer(asn1_string.data, asn1_string.length)[:] + ) + + +def _decode_authority_key_identifier(backend, akid): + akid = backend._ffi.cast("AUTHORITY_KEYID *", akid) + akid = backend._ffi.gc(akid, backend._lib.AUTHORITY_KEYID_free) + key_identifier = None + authority_cert_issuer = None + + if akid.keyid != backend._ffi.NULL: + key_identifier = backend._ffi.buffer( + akid.keyid.data, akid.keyid.length + )[:] + + if akid.issuer != backend._ffi.NULL: + authority_cert_issuer = _decode_general_names( + backend, akid.issuer + ) + + authority_cert_serial_number = _asn1_integer_to_int_or_none( + backend, akid.serial + ) + + return x509.AuthorityKeyIdentifier( + key_identifier, authority_cert_issuer, authority_cert_serial_number + ) + + +def _decode_authority_information_access(backend, aia): + aia = backend._ffi.cast("Cryptography_STACK_OF_ACCESS_DESCRIPTION *", aia) + aia = backend._ffi.gc(aia, backend._lib.sk_ACCESS_DESCRIPTION_free) + num = backend._lib.sk_ACCESS_DESCRIPTION_num(aia) + access_descriptions = [] + for i in range(num): + ad = backend._lib.sk_ACCESS_DESCRIPTION_value(aia, i) + backend.openssl_assert(ad.method != backend._ffi.NULL) + oid = x509.ObjectIdentifier(_obj2txt(backend, ad.method)) + backend.openssl_assert(ad.location != backend._ffi.NULL) + gn = _decode_general_name(backend, ad.location) + access_descriptions.append(x509.AccessDescription(oid, gn)) + + return x509.AuthorityInformationAccess(access_descriptions) + + +def _decode_key_usage(backend, bit_string): + bit_string = backend._ffi.cast("ASN1_BIT_STRING *", bit_string) + bit_string = backend._ffi.gc(bit_string, backend._lib.ASN1_BIT_STRING_free) + get_bit = backend._lib.ASN1_BIT_STRING_get_bit + digital_signature = get_bit(bit_string, 0) == 1 + content_commitment = get_bit(bit_string, 1) == 1 + key_encipherment = get_bit(bit_string, 2) == 1 + data_encipherment = get_bit(bit_string, 3) == 1 + key_agreement = get_bit(bit_string, 4) == 1 + key_cert_sign = get_bit(bit_string, 5) == 1 + crl_sign = get_bit(bit_string, 6) == 1 + encipher_only = get_bit(bit_string, 7) == 1 + decipher_only = get_bit(bit_string, 8) == 1 + return x509.KeyUsage( + digital_signature, + content_commitment, + key_encipherment, + data_encipherment, + key_agreement, + key_cert_sign, + crl_sign, + encipher_only, + decipher_only + ) + + +def _decode_general_names_extension(backend, gns): + gns = backend._ffi.cast("GENERAL_NAMES *", gns) + gns = backend._ffi.gc(gns, backend._lib.GENERAL_NAMES_free) + general_names = _decode_general_names(backend, gns) + return general_names + + +def _decode_subject_alt_name(backend, ext): + return x509.SubjectAlternativeName( + _decode_general_names_extension(backend, ext) + ) + + +def _decode_issuer_alt_name(backend, ext): + return x509.IssuerAlternativeName( + _decode_general_names_extension(backend, ext) + ) + + +def _decode_name_constraints(backend, nc): + nc = backend._ffi.cast("NAME_CONSTRAINTS *", nc) + nc = backend._ffi.gc(nc, backend._lib.NAME_CONSTRAINTS_free) + permitted = _decode_general_subtrees(backend, nc.permittedSubtrees) + excluded = _decode_general_subtrees(backend, nc.excludedSubtrees) + return x509.NameConstraints( + permitted_subtrees=permitted, excluded_subtrees=excluded + ) + + +def _decode_general_subtrees(backend, stack_subtrees): + if stack_subtrees == backend._ffi.NULL: + return None + + num = backend._lib.sk_GENERAL_SUBTREE_num(stack_subtrees) + subtrees = [] + + for i in range(num): + obj = backend._lib.sk_GENERAL_SUBTREE_value(stack_subtrees, i) + backend.openssl_assert(obj != backend._ffi.NULL) + name = _decode_general_name(backend, obj.base) + subtrees.append(name) + + return subtrees + + +def _decode_policy_constraints(backend, pc): + pc = backend._ffi.cast("POLICY_CONSTRAINTS *", pc) + pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free) + + require_explicit_policy = _asn1_integer_to_int_or_none( + backend, pc.requireExplicitPolicy + ) + inhibit_policy_mapping = _asn1_integer_to_int_or_none( + backend, pc.inhibitPolicyMapping + ) + + return x509.PolicyConstraints( + require_explicit_policy, inhibit_policy_mapping + ) + + +def _decode_extended_key_usage(backend, sk): + sk = backend._ffi.cast("Cryptography_STACK_OF_ASN1_OBJECT *", sk) + sk = backend._ffi.gc(sk, backend._lib.sk_ASN1_OBJECT_free) + num = backend._lib.sk_ASN1_OBJECT_num(sk) + ekus = [] + + for i in range(num): + obj = backend._lib.sk_ASN1_OBJECT_value(sk, i) + backend.openssl_assert(obj != backend._ffi.NULL) + oid = x509.ObjectIdentifier(_obj2txt(backend, obj)) + ekus.append(oid) + + return x509.ExtendedKeyUsage(ekus) + + +_DISTPOINT_TYPE_FULLNAME = 0 +_DISTPOINT_TYPE_RELATIVENAME = 1 + + +def _decode_dist_points(backend, cdps): + cdps = backend._ffi.cast("Cryptography_STACK_OF_DIST_POINT *", cdps) + cdps = backend._ffi.gc(cdps, backend._lib.CRL_DIST_POINTS_free) + + num = backend._lib.sk_DIST_POINT_num(cdps) + dist_points = [] + for i in range(num): + full_name = None + relative_name = None + crl_issuer = None + reasons = None + cdp = backend._lib.sk_DIST_POINT_value(cdps, i) + if cdp.reasons != backend._ffi.NULL: + # We will check each bit from RFC 5280 + # ReasonFlags ::= BIT STRING { + # unused (0), + # keyCompromise (1), + # cACompromise (2), + # affiliationChanged (3), + # superseded (4), + # cessationOfOperation (5), + # certificateHold (6), + # privilegeWithdrawn (7), + # aACompromise (8) } + reasons = [] + get_bit = backend._lib.ASN1_BIT_STRING_get_bit + if get_bit(cdp.reasons, 1): + reasons.append(x509.ReasonFlags.key_compromise) + + if get_bit(cdp.reasons, 2): + reasons.append(x509.ReasonFlags.ca_compromise) + + if get_bit(cdp.reasons, 3): + reasons.append(x509.ReasonFlags.affiliation_changed) + + if get_bit(cdp.reasons, 4): + reasons.append(x509.ReasonFlags.superseded) + + if get_bit(cdp.reasons, 5): + reasons.append(x509.ReasonFlags.cessation_of_operation) + + if get_bit(cdp.reasons, 6): + reasons.append(x509.ReasonFlags.certificate_hold) + + if get_bit(cdp.reasons, 7): + reasons.append(x509.ReasonFlags.privilege_withdrawn) + + if get_bit(cdp.reasons, 8): + reasons.append(x509.ReasonFlags.aa_compromise) + + reasons = frozenset(reasons) + + if cdp.CRLissuer != backend._ffi.NULL: + crl_issuer = _decode_general_names(backend, cdp.CRLissuer) + + # Certificates may have a crl_issuer/reasons and no distribution + # point so make sure it's not null. + if cdp.distpoint != backend._ffi.NULL: + # Type 0 is fullName, there is no #define for it in the code. + if cdp.distpoint.type == _DISTPOINT_TYPE_FULLNAME: + full_name = _decode_general_names( + backend, cdp.distpoint.name.fullname + ) + # OpenSSL code doesn't test for a specific type for + # relativename, everything that isn't fullname is considered + # relativename. Per RFC 5280: + # + # DistributionPointName ::= CHOICE { + # fullName [0] GeneralNames, + # nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + else: + rns = cdp.distpoint.name.relativename + rnum = backend._lib.sk_X509_NAME_ENTRY_num(rns) + attributes = set() + for i in range(rnum): + rn = backend._lib.sk_X509_NAME_ENTRY_value( + rns, i + ) + backend.openssl_assert(rn != backend._ffi.NULL) + attributes.add( + _decode_x509_name_entry(backend, rn) + ) + + relative_name = x509.RelativeDistinguishedName(attributes) + + dist_points.append( + x509.DistributionPoint( + full_name, relative_name, reasons, crl_issuer + ) + ) + + return dist_points + + +def _decode_crl_distribution_points(backend, cdps): + dist_points = _decode_dist_points(backend, cdps) + return x509.CRLDistributionPoints(dist_points) + + +def _decode_freshest_crl(backend, cdps): + dist_points = _decode_dist_points(backend, cdps) + return x509.FreshestCRL(dist_points) + + +def _decode_inhibit_any_policy(backend, asn1_int): + asn1_int = backend._ffi.cast("ASN1_INTEGER *", asn1_int) + asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free) + skip_certs = _asn1_integer_to_int(backend, asn1_int) + return x509.InhibitAnyPolicy(skip_certs) + + +def _decode_precert_signed_certificate_timestamps(backend, asn1_scts): + from cryptography.hazmat.backends.openssl.x509 import ( + _SignedCertificateTimestamp + ) + asn1_scts = backend._ffi.cast("Cryptography_STACK_OF_SCT *", asn1_scts) + asn1_scts = backend._ffi.gc(asn1_scts, backend._lib.SCT_LIST_free) + + scts = [] + for i in range(backend._lib.sk_SCT_num(asn1_scts)): + sct = backend._lib.sk_SCT_value(asn1_scts, i) + + scts.append(_SignedCertificateTimestamp(backend, asn1_scts, sct)) + return x509.PrecertificateSignedCertificateTimestamps(scts) + + +# CRLReason ::= ENUMERATED { +# unspecified (0), +# keyCompromise (1), +# cACompromise (2), +# affiliationChanged (3), +# superseded (4), +# cessationOfOperation (5), +# certificateHold (6), +# -- value 7 is not used +# removeFromCRL (8), +# privilegeWithdrawn (9), +# aACompromise (10) } +_CRL_ENTRY_REASON_CODE_TO_ENUM = { + 0: x509.ReasonFlags.unspecified, + 1: x509.ReasonFlags.key_compromise, + 2: x509.ReasonFlags.ca_compromise, + 3: x509.ReasonFlags.affiliation_changed, + 4: x509.ReasonFlags.superseded, + 5: x509.ReasonFlags.cessation_of_operation, + 6: x509.ReasonFlags.certificate_hold, + 8: x509.ReasonFlags.remove_from_crl, + 9: x509.ReasonFlags.privilege_withdrawn, + 10: x509.ReasonFlags.aa_compromise, +} + + +_CRL_ENTRY_REASON_ENUM_TO_CODE = { + x509.ReasonFlags.unspecified: 0, + x509.ReasonFlags.key_compromise: 1, + x509.ReasonFlags.ca_compromise: 2, + x509.ReasonFlags.affiliation_changed: 3, + x509.ReasonFlags.superseded: 4, + x509.ReasonFlags.cessation_of_operation: 5, + x509.ReasonFlags.certificate_hold: 6, + x509.ReasonFlags.remove_from_crl: 8, + x509.ReasonFlags.privilege_withdrawn: 9, + x509.ReasonFlags.aa_compromise: 10 +} + + +def _decode_crl_reason(backend, enum): + enum = backend._ffi.cast("ASN1_ENUMERATED *", enum) + enum = backend._ffi.gc(enum, backend._lib.ASN1_ENUMERATED_free) + code = backend._lib.ASN1_ENUMERATED_get(enum) + + try: + return x509.CRLReason(_CRL_ENTRY_REASON_CODE_TO_ENUM[code]) + except KeyError: + raise ValueError("Unsupported reason code: {0}".format(code)) + + +def _decode_invalidity_date(backend, inv_date): + generalized_time = backend._ffi.cast( + "ASN1_GENERALIZEDTIME *", inv_date + ) + generalized_time = backend._ffi.gc( + generalized_time, backend._lib.ASN1_GENERALIZEDTIME_free + ) + return x509.InvalidityDate( + _parse_asn1_generalized_time(backend, generalized_time) + ) + + +def _decode_cert_issuer(backend, gns): + gns = backend._ffi.cast("GENERAL_NAMES *", gns) + gns = backend._ffi.gc(gns, backend._lib.GENERAL_NAMES_free) + general_names = _decode_general_names(backend, gns) + return x509.CertificateIssuer(general_names) + + +def _asn1_to_der(backend, asn1_type): + buf = backend._ffi.new("unsigned char **") + res = backend._lib.i2d_ASN1_TYPE(asn1_type, buf) + backend.openssl_assert(res >= 0) + backend.openssl_assert(buf[0] != backend._ffi.NULL) + buf = backend._ffi.gc( + buf, lambda buffer: backend._lib.OPENSSL_free(buffer[0]) + ) + return backend._ffi.buffer(buf[0], res)[:] + + +def _asn1_integer_to_int(backend, asn1_int): + bn = backend._lib.ASN1_INTEGER_to_BN(asn1_int, backend._ffi.NULL) + backend.openssl_assert(bn != backend._ffi.NULL) + bn = backend._ffi.gc(bn, backend._lib.BN_free) + return backend._bn_to_int(bn) + + +def _asn1_integer_to_int_or_none(backend, asn1_int): + if asn1_int == backend._ffi.NULL: + return None + else: + return _asn1_integer_to_int(backend, asn1_int) + + +def _asn1_string_to_bytes(backend, asn1_string): + return backend._ffi.buffer(asn1_string.data, asn1_string.length)[:] + + +def _asn1_string_to_ascii(backend, asn1_string): + return _asn1_string_to_bytes(backend, asn1_string).decode("ascii") + + +def _asn1_string_to_utf8(backend, asn1_string): + buf = backend._ffi.new("unsigned char **") + res = backend._lib.ASN1_STRING_to_UTF8(buf, asn1_string) + if res == -1: + raise ValueError( + "Unsupported ASN1 string type. Type: {0}".format(asn1_string.type) + ) + + backend.openssl_assert(buf[0] != backend._ffi.NULL) + buf = backend._ffi.gc( + buf, lambda buffer: backend._lib.OPENSSL_free(buffer[0]) + ) + return backend._ffi.buffer(buf[0], res)[:].decode('utf8') + + +def _parse_asn1_time(backend, asn1_time): + backend.openssl_assert(asn1_time != backend._ffi.NULL) + generalized_time = backend._lib.ASN1_TIME_to_generalizedtime( + asn1_time, backend._ffi.NULL + ) + if generalized_time == backend._ffi.NULL: + raise ValueError( + "Couldn't parse ASN.1 time as generalizedtime {!r}".format( + _asn1_string_to_bytes(backend, asn1_time) + ) + ) + + generalized_time = backend._ffi.gc( + generalized_time, backend._lib.ASN1_GENERALIZEDTIME_free + ) + return _parse_asn1_generalized_time(backend, generalized_time) + + +def _parse_asn1_generalized_time(backend, generalized_time): + time = _asn1_string_to_ascii( + backend, backend._ffi.cast("ASN1_STRING *", generalized_time) + ) + return datetime.datetime.strptime(time, "%Y%m%d%H%M%SZ") + + +_EXTENSION_HANDLERS_NO_SCT = { + ExtensionOID.BASIC_CONSTRAINTS: _decode_basic_constraints, + ExtensionOID.SUBJECT_KEY_IDENTIFIER: _decode_subject_key_identifier, + ExtensionOID.KEY_USAGE: _decode_key_usage, + ExtensionOID.SUBJECT_ALTERNATIVE_NAME: _decode_subject_alt_name, + ExtensionOID.EXTENDED_KEY_USAGE: _decode_extended_key_usage, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _decode_authority_key_identifier, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _decode_authority_information_access + ), + ExtensionOID.CERTIFICATE_POLICIES: _decode_certificate_policies, + ExtensionOID.CRL_DISTRIBUTION_POINTS: _decode_crl_distribution_points, + ExtensionOID.FRESHEST_CRL: _decode_freshest_crl, + ExtensionOID.OCSP_NO_CHECK: _decode_ocsp_no_check, + ExtensionOID.INHIBIT_ANY_POLICY: _decode_inhibit_any_policy, + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _decode_issuer_alt_name, + ExtensionOID.NAME_CONSTRAINTS: _decode_name_constraints, + ExtensionOID.POLICY_CONSTRAINTS: _decode_policy_constraints, +} +_EXTENSION_HANDLERS = _EXTENSION_HANDLERS_NO_SCT.copy() +_EXTENSION_HANDLERS[ + ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS +] = _decode_precert_signed_certificate_timestamps + + +_REVOKED_EXTENSION_HANDLERS = { + CRLEntryExtensionOID.CRL_REASON: _decode_crl_reason, + CRLEntryExtensionOID.INVALIDITY_DATE: _decode_invalidity_date, + CRLEntryExtensionOID.CERTIFICATE_ISSUER: _decode_cert_issuer, +} + +_CRL_EXTENSION_HANDLERS = { + ExtensionOID.CRL_NUMBER: _decode_crl_number, + ExtensionOID.DELTA_CRL_INDICATOR: _decode_delta_crl_indicator, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _decode_authority_key_identifier, + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _decode_issuer_alt_name, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _decode_authority_information_access + ), +} + +_CERTIFICATE_EXTENSION_PARSER_NO_SCT = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_get_ext(x, i), + handlers=_EXTENSION_HANDLERS_NO_SCT +) + +_CERTIFICATE_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_get_ext(x, i), + handlers=_EXTENSION_HANDLERS +) + +_CSR_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.sk_X509_EXTENSION_num(x), + get_ext=lambda backend, x, i: backend._lib.sk_X509_EXTENSION_value(x, i), + handlers=_EXTENSION_HANDLERS +) + +_REVOKED_CERTIFICATE_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_REVOKED_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_REVOKED_get_ext(x, i), + handlers=_REVOKED_EXTENSION_HANDLERS, +) + +_CRL_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_CRL_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_CRL_get_ext(x, i), + handlers=_CRL_EXTENSION_HANDLERS, +) diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/dh.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/dh.py new file mode 100644 index 0000000..095f062 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/dh.py @@ -0,0 +1,280 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import dh + + +def _dh_params_dup(dh_cdata, backend): + lib = backend._lib + ffi = backend._ffi + + param_cdata = lib.DHparams_dup(dh_cdata) + backend.openssl_assert(param_cdata != ffi.NULL) + param_cdata = ffi.gc(param_cdata, lib.DH_free) + if lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102: + # In OpenSSL versions < 1.0.2 or libressl DHparams_dup don't copy q + q = ffi.new("BIGNUM **") + lib.DH_get0_pqg(dh_cdata, ffi.NULL, q, ffi.NULL) + q_dup = lib.BN_dup(q[0]) + res = lib.DH_set0_pqg(param_cdata, ffi.NULL, q_dup, ffi.NULL) + backend.openssl_assert(res == 1) + + return param_cdata + + +def _dh_cdata_to_parameters(dh_cdata, backend): + param_cdata = _dh_params_dup(dh_cdata, backend) + return _DHParameters(backend, param_cdata) + + +@utils.register_interface(dh.DHParametersWithSerialization) +class _DHParameters(object): + def __init__(self, backend, dh_cdata): + self._backend = backend + self._dh_cdata = dh_cdata + + def parameter_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + return dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val + ) + + def generate_private_key(self): + return self._backend.generate_dh_private_key(self) + + def parameter_bytes(self, encoding, format): + if format is not serialization.ParameterFormat.PKCS3: + raise ValueError( + "Only PKCS3 serialization is supported" + ) + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION) + + return self._backend._parameter_bytes( + encoding, + format, + self._dh_cdata + ) + + +def _handle_dh_compute_key_error(errors, backend): + lib = backend._lib + + backend.openssl_assert( + errors[0]._lib_reason_match( + lib.ERR_LIB_DH, lib.DH_R_INVALID_PUBKEY + ) + ) + + raise ValueError("Public key value is invalid for this exchange.") + + +def _get_dh_num_bits(backend, dh_cdata): + p = backend._ffi.new("BIGNUM **") + backend._lib.DH_get0_pqg(dh_cdata, p, + backend._ffi.NULL, + backend._ffi.NULL) + backend.openssl_assert(p[0] != backend._ffi.NULL) + return backend._lib.BN_num_bits(p[0]) + + +@utils.register_interface(dh.DHPrivateKeyWithSerialization) +class _DHPrivateKey(object): + def __init__(self, backend, dh_cdata, evp_pkey): + self._backend = backend + self._dh_cdata = dh_cdata + self._evp_pkey = evp_pkey + self._key_size_bytes = self._backend._lib.DH_size(dh_cdata) + + @property + def key_size(self): + return _get_dh_num_bits(self._backend, self._dh_cdata) + + def private_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + pub_key = self._backend._ffi.new("BIGNUM **") + priv_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(self._dh_cdata, pub_key, priv_key) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(priv_key[0] != self._backend._ffi.NULL) + return dh.DHPrivateNumbers( + public_numbers=dh.DHPublicNumbers( + parameter_numbers=dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val + ), + y=self._backend._bn_to_int(pub_key[0]) + ), + x=self._backend._bn_to_int(priv_key[0]) + ) + + def exchange(self, peer_public_key): + + buf = self._backend._ffi.new("unsigned char[]", self._key_size_bytes) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(peer_public_key._dh_cdata, pub_key, + self._backend._ffi.NULL) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + res = self._backend._lib.DH_compute_key( + buf, + pub_key[0], + self._dh_cdata + ) + + if res == -1: + errors = self._backend._consume_errors() + return _handle_dh_compute_key_error(errors, self._backend) + else: + self._backend.openssl_assert(res >= 1) + + key = self._backend._ffi.buffer(buf)[:res] + pad = self._key_size_bytes - len(key) + + if pad > 0: + key = (b"\x00" * pad) + key + + return key + + def public_key(self): + dh_cdata = _dh_params_dup(self._dh_cdata, self._backend) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(self._dh_cdata, + pub_key, self._backend._ffi.NULL) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + pub_key_dup = self._backend._lib.BN_dup(pub_key[0]) + self._backend.openssl_assert(pub_key_dup != self._backend._ffi.NULL) + + res = self._backend._lib.DH_set0_key(dh_cdata, + pub_key_dup, + self._backend._ffi.NULL) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._dh_cdata_to_evp_pkey(dh_cdata) + return _DHPublicKey(self._backend, dh_cdata, evp_pkey) + + def parameters(self): + return _dh_cdata_to_parameters(self._dh_cdata, self._backend) + + def private_bytes(self, encoding, format, encryption_algorithm): + if format is not serialization.PrivateFormat.PKCS8: + raise ValueError( + "DH private keys support only PKCS8 serialization" + ) + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION) + + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._dh_cdata + ) + + +@utils.register_interface(dh.DHPublicKeyWithSerialization) +class _DHPublicKey(object): + def __init__(self, backend, dh_cdata, evp_pkey): + self._backend = backend + self._dh_cdata = dh_cdata + self._evp_pkey = evp_pkey + self._key_size_bits = _get_dh_num_bits(self._backend, self._dh_cdata) + + @property + def key_size(self): + return self._key_size_bits + + def public_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(self._dh_cdata, + pub_key, self._backend._ffi.NULL) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + return dh.DHPublicNumbers( + parameter_numbers=dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val + ), + y=self._backend._bn_to_int(pub_key[0]) + ) + + def parameters(self): + return _dh_cdata_to_parameters(self._dh_cdata, self._backend) + + def public_bytes(self, encoding, format): + if format is not serialization.PublicFormat.SubjectPublicKeyInfo: + raise ValueError( + "DH public keys support only " + "SubjectPublicKeyInfo serialization" + ) + + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION) + + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + None + ) diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/dsa.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/dsa.py new file mode 100644 index 0000000..48886e4 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/dsa.py @@ -0,0 +1,269 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, _check_not_prehashed, + _warn_sign_verify_deprecated +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ( + AsymmetricSignatureContext, AsymmetricVerificationContext, dsa +) + + +def _dsa_sig_sign(backend, private_key, data): + sig_buf_len = backend._lib.DSA_size(private_key._dsa_cdata) + sig_buf = backend._ffi.new("unsigned char[]", sig_buf_len) + buflen = backend._ffi.new("unsigned int *") + + # The first parameter passed to DSA_sign is unused by OpenSSL but + # must be an integer. + res = backend._lib.DSA_sign( + 0, data, len(data), sig_buf, buflen, private_key._dsa_cdata + ) + backend.openssl_assert(res == 1) + backend.openssl_assert(buflen[0]) + + return backend._ffi.buffer(sig_buf)[:buflen[0]] + + +def _dsa_sig_verify(backend, public_key, signature, data): + # The first parameter passed to DSA_verify is unused by OpenSSL but + # must be an integer. + res = backend._lib.DSA_verify( + 0, data, len(data), signature, len(signature), public_key._dsa_cdata + ) + + if res != 1: + backend._consume_errors() + raise InvalidSignature + + +@utils.register_interface(AsymmetricVerificationContext) +class _DSAVerificationContext(object): + def __init__(self, backend, public_key, signature, algorithm): + self._backend = backend + self._public_key = public_key + self._signature = signature + self._algorithm = algorithm + + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def verify(self): + data_to_verify = self._hash_ctx.finalize() + + _dsa_sig_verify( + self._backend, self._public_key, self._signature, data_to_verify + ) + + +@utils.register_interface(AsymmetricSignatureContext) +class _DSASignatureContext(object): + def __init__(self, backend, private_key, algorithm): + self._backend = backend + self._private_key = private_key + self._algorithm = algorithm + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def finalize(self): + data_to_sign = self._hash_ctx.finalize() + return _dsa_sig_sign(self._backend, self._private_key, data_to_sign) + + +@utils.register_interface(dsa.DSAParametersWithNumbers) +class _DSAParameters(object): + def __init__(self, backend, dsa_cdata): + self._backend = backend + self._dsa_cdata = dsa_cdata + + def parameter_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + return dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]) + ) + + def generate_private_key(self): + return self._backend.generate_dsa_private_key(self) + + +@utils.register_interface(dsa.DSAPrivateKeyWithSerialization) +class _DSAPrivateKey(object): + def __init__(self, backend, dsa_cdata, evp_pkey): + self._backend = backend + self._dsa_cdata = dsa_cdata + self._evp_pkey = evp_pkey + + p = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg( + dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL + ) + self._backend.openssl_assert(p[0] != backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(p[0]) + + key_size = utils.read_only_property("_key_size") + + def signer(self, signature_algorithm): + _warn_sign_verify_deprecated() + _check_not_prehashed(signature_algorithm) + return _DSASignatureContext(self._backend, self, signature_algorithm) + + def private_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + pub_key = self._backend._ffi.new("BIGNUM **") + priv_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + self._backend._lib.DSA_get0_key(self._dsa_cdata, pub_key, priv_key) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(priv_key[0] != self._backend._ffi.NULL) + return dsa.DSAPrivateNumbers( + public_numbers=dsa.DSAPublicNumbers( + parameter_numbers=dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]) + ), + y=self._backend._bn_to_int(pub_key[0]) + ), + x=self._backend._bn_to_int(priv_key[0]) + ) + + def public_key(self): + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_key( + self._dsa_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + pub_key_dup = self._backend._lib.BN_dup(pub_key[0]) + res = self._backend._lib.DSA_set0_key( + dsa_cdata, pub_key_dup, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._dsa_cdata_to_evp_pkey(dsa_cdata) + return _DSAPublicKey(self._backend, dsa_cdata, evp_pkey) + + def parameters(self): + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + return _DSAParameters(self._backend, dsa_cdata) + + def private_bytes(self, encoding, format, encryption_algorithm): + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._dsa_cdata + ) + + def sign(self, data, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _dsa_sig_sign(self._backend, self, data) + + +@utils.register_interface(dsa.DSAPublicKeyWithSerialization) +class _DSAPublicKey(object): + def __init__(self, backend, dsa_cdata, evp_pkey): + self._backend = backend + self._dsa_cdata = dsa_cdata + self._evp_pkey = evp_pkey + p = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg( + dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL + ) + self._backend.openssl_assert(p[0] != backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(p[0]) + + key_size = utils.read_only_property("_key_size") + + def verifier(self, signature, signature_algorithm): + _warn_sign_verify_deprecated() + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + + _check_not_prehashed(signature_algorithm) + return _DSAVerificationContext( + self._backend, self, signature, signature_algorithm + ) + + def public_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + self._backend._lib.DSA_get0_key( + self._dsa_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + return dsa.DSAPublicNumbers( + parameter_numbers=dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]) + ), + y=self._backend._bn_to_int(pub_key[0]) + ) + + def parameters(self): + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + return _DSAParameters(self._backend, dsa_cdata) + + def public_bytes(self, encoding, format): + if format is serialization.PublicFormat.PKCS1: + raise ValueError( + "DSA public keys do not support PKCS1 serialization" + ) + + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + None + ) + + def verify(self, signature, data, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _dsa_sig_verify(self._backend, self, signature, data) diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/ec.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/ec.py new file mode 100644 index 0000000..69da234 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/ec.py @@ -0,0 +1,298 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, _check_not_prehashed, + _warn_sign_verify_deprecated +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ( + AsymmetricSignatureContext, AsymmetricVerificationContext, ec +) + + +def _check_signature_algorithm(signature_algorithm): + if not isinstance(signature_algorithm, ec.ECDSA): + raise UnsupportedAlgorithm( + "Unsupported elliptic curve signature algorithm.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM) + + +def _ec_key_curve_sn(backend, ec_key): + group = backend._lib.EC_KEY_get0_group(ec_key) + backend.openssl_assert(group != backend._ffi.NULL) + + nid = backend._lib.EC_GROUP_get_curve_name(group) + # The following check is to find EC keys with unnamed curves and raise + # an error for now. + if nid == backend._lib.NID_undef: + raise NotImplementedError( + "ECDSA certificates with unnamed curves are unsupported " + "at this time" + ) + + curve_name = backend._lib.OBJ_nid2sn(nid) + backend.openssl_assert(curve_name != backend._ffi.NULL) + + sn = backend._ffi.string(curve_name).decode('ascii') + return sn + + +def _mark_asn1_named_ec_curve(backend, ec_cdata): + """ + Set the named curve flag on the EC_KEY. This causes OpenSSL to + serialize EC keys along with their curve OID which makes + deserialization easier. + """ + + backend._lib.EC_KEY_set_asn1_flag( + ec_cdata, backend._lib.OPENSSL_EC_NAMED_CURVE + ) + + +def _sn_to_elliptic_curve(backend, sn): + try: + return ec._CURVE_TYPES[sn]() + except KeyError: + raise UnsupportedAlgorithm( + "{0} is not a supported elliptic curve".format(sn), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE + ) + + +def _ecdsa_sig_sign(backend, private_key, data): + max_size = backend._lib.ECDSA_size(private_key._ec_key) + backend.openssl_assert(max_size > 0) + + sigbuf = backend._ffi.new("unsigned char[]", max_size) + siglen_ptr = backend._ffi.new("unsigned int[]", 1) + res = backend._lib.ECDSA_sign( + 0, data, len(data), sigbuf, siglen_ptr, private_key._ec_key + ) + backend.openssl_assert(res == 1) + return backend._ffi.buffer(sigbuf)[:siglen_ptr[0]] + + +def _ecdsa_sig_verify(backend, public_key, signature, data): + res = backend._lib.ECDSA_verify( + 0, data, len(data), signature, len(signature), public_key._ec_key + ) + if res != 1: + backend._consume_errors() + raise InvalidSignature + + +@utils.register_interface(AsymmetricSignatureContext) +class _ECDSASignatureContext(object): + def __init__(self, backend, private_key, algorithm): + self._backend = backend + self._private_key = private_key + self._digest = hashes.Hash(algorithm, backend) + + def update(self, data): + self._digest.update(data) + + def finalize(self): + digest = self._digest.finalize() + + return _ecdsa_sig_sign(self._backend, self._private_key, digest) + + +@utils.register_interface(AsymmetricVerificationContext) +class _ECDSAVerificationContext(object): + def __init__(self, backend, public_key, signature, algorithm): + self._backend = backend + self._public_key = public_key + self._signature = signature + self._digest = hashes.Hash(algorithm, backend) + + def update(self, data): + self._digest.update(data) + + def verify(self): + digest = self._digest.finalize() + _ecdsa_sig_verify( + self._backend, self._public_key, self._signature, digest + ) + + +@utils.register_interface(ec.EllipticCurvePrivateKeyWithSerialization) +class _EllipticCurvePrivateKey(object): + def __init__(self, backend, ec_key_cdata, evp_pkey): + self._backend = backend + _mark_asn1_named_ec_curve(backend, ec_key_cdata) + self._ec_key = ec_key_cdata + self._evp_pkey = evp_pkey + + sn = _ec_key_curve_sn(backend, ec_key_cdata) + self._curve = _sn_to_elliptic_curve(backend, sn) + + curve = utils.read_only_property("_curve") + + @property + def key_size(self): + return self.curve.key_size + + def signer(self, signature_algorithm): + _warn_sign_verify_deprecated() + _check_signature_algorithm(signature_algorithm) + _check_not_prehashed(signature_algorithm.algorithm) + return _ECDSASignatureContext( + self._backend, self, signature_algorithm.algorithm + ) + + def exchange(self, algorithm, peer_public_key): + if not ( + self._backend.elliptic_curve_exchange_algorithm_supported( + algorithm, self.curve + ) + ): + raise UnsupportedAlgorithm( + "This backend does not support the ECDH algorithm.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + + if peer_public_key.curve.name != self.curve.name: + raise ValueError( + "peer_public_key and self are not on the same curve" + ) + + group = self._backend._lib.EC_KEY_get0_group(self._ec_key) + z_len = (self._backend._lib.EC_GROUP_get_degree(group) + 7) // 8 + self._backend.openssl_assert(z_len > 0) + z_buf = self._backend._ffi.new("uint8_t[]", z_len) + peer_key = self._backend._lib.EC_KEY_get0_public_key( + peer_public_key._ec_key + ) + + r = self._backend._lib.ECDH_compute_key( + z_buf, z_len, peer_key, self._ec_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(r > 0) + return self._backend._ffi.buffer(z_buf)[:z_len] + + def public_key(self): + group = self._backend._lib.EC_KEY_get0_group(self._ec_key) + self._backend.openssl_assert(group != self._backend._ffi.NULL) + + curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group) + + public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid) + self._backend.openssl_assert(public_ec_key != self._backend._ffi.NULL) + public_ec_key = self._backend._ffi.gc( + public_ec_key, self._backend._lib.EC_KEY_free + ) + + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + + res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point) + self._backend.openssl_assert(res == 1) + + evp_pkey = self._backend._ec_cdata_to_evp_pkey(public_ec_key) + + return _EllipticCurvePublicKey(self._backend, public_ec_key, evp_pkey) + + def private_numbers(self): + bn = self._backend._lib.EC_KEY_get0_private_key(self._ec_key) + private_value = self._backend._bn_to_int(bn) + return ec.EllipticCurvePrivateNumbers( + private_value=private_value, + public_numbers=self.public_key().public_numbers() + ) + + def private_bytes(self, encoding, format, encryption_algorithm): + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._ec_key + ) + + def sign(self, data, signature_algorithm): + _check_signature_algorithm(signature_algorithm) + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, signature_algorithm._algorithm + ) + return _ecdsa_sig_sign(self._backend, self, data) + + +@utils.register_interface(ec.EllipticCurvePublicKeyWithSerialization) +class _EllipticCurvePublicKey(object): + def __init__(self, backend, ec_key_cdata, evp_pkey): + self._backend = backend + _mark_asn1_named_ec_curve(backend, ec_key_cdata) + self._ec_key = ec_key_cdata + self._evp_pkey = evp_pkey + + sn = _ec_key_curve_sn(backend, ec_key_cdata) + self._curve = _sn_to_elliptic_curve(backend, sn) + + curve = utils.read_only_property("_curve") + + @property + def key_size(self): + return self.curve.key_size + + def verifier(self, signature, signature_algorithm): + _warn_sign_verify_deprecated() + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + + _check_signature_algorithm(signature_algorithm) + _check_not_prehashed(signature_algorithm.algorithm) + return _ECDSAVerificationContext( + self._backend, self, signature, signature_algorithm.algorithm + ) + + def public_numbers(self): + get_func, group = ( + self._backend._ec_key_determine_group_get_func(self._ec_key) + ) + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + + with self._backend._tmp_bn_ctx() as bn_ctx: + bn_x = self._backend._lib.BN_CTX_get(bn_ctx) + bn_y = self._backend._lib.BN_CTX_get(bn_ctx) + + res = get_func(group, point, bn_x, bn_y, bn_ctx) + self._backend.openssl_assert(res == 1) + + x = self._backend._bn_to_int(bn_x) + y = self._backend._bn_to_int(bn_y) + + return ec.EllipticCurvePublicNumbers( + x=x, + y=y, + curve=self._curve + ) + + def public_bytes(self, encoding, format): + if format is serialization.PublicFormat.PKCS1: + raise ValueError( + "EC public keys do not support PKCS1 serialization" + ) + + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + None + ) + + def verify(self, signature, data, signature_algorithm): + _check_signature_algorithm(signature_algorithm) + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, signature_algorithm._algorithm + ) + _ecdsa_sig_verify(self._backend, self, signature, data) diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/encode_asn1.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/encode_asn1.py new file mode 100644 index 0000000..a2c7ed7 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/encode_asn1.py @@ -0,0 +1,611 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import calendar +import ipaddress + +import six + +from cryptography import utils, x509 +from cryptography.hazmat.backends.openssl.decode_asn1 import ( + _CRL_ENTRY_REASON_ENUM_TO_CODE, _DISTPOINT_TYPE_FULLNAME, + _DISTPOINT_TYPE_RELATIVENAME +) +from cryptography.x509.name import _ASN1Type +from cryptography.x509.oid import CRLEntryExtensionOID, ExtensionOID + + +def _encode_asn1_int(backend, x): + """ + Converts a python integer to an ASN1_INTEGER. The returned ASN1_INTEGER + will not be garbage collected (to support adding them to structs that take + ownership of the object). Be sure to register it for GC if it will be + discarded after use. + + """ + # Convert Python integer to OpenSSL "bignum" in case value exceeds + # machine's native integer limits (note: `int_to_bn` doesn't automatically + # GC). + i = backend._int_to_bn(x) + i = backend._ffi.gc(i, backend._lib.BN_free) + + # Wrap in an ASN.1 integer. Don't GC -- as documented. + i = backend._lib.BN_to_ASN1_INTEGER(i, backend._ffi.NULL) + backend.openssl_assert(i != backend._ffi.NULL) + return i + + +def _encode_asn1_int_gc(backend, x): + i = _encode_asn1_int(backend, x) + i = backend._ffi.gc(i, backend._lib.ASN1_INTEGER_free) + return i + + +def _encode_asn1_str(backend, data, length): + """ + Create an ASN1_OCTET_STRING from a Python byte string. + """ + s = backend._lib.ASN1_OCTET_STRING_new() + res = backend._lib.ASN1_OCTET_STRING_set(s, data, length) + backend.openssl_assert(res == 1) + return s + + +def _encode_asn1_utf8_str(backend, string): + """ + Create an ASN1_UTF8STRING from a Python unicode string. + This object will be an ASN1_STRING with UTF8 type in OpenSSL and + can be decoded with ASN1_STRING_to_UTF8. + """ + s = backend._lib.ASN1_UTF8STRING_new() + res = backend._lib.ASN1_STRING_set( + s, string.encode("utf8"), len(string.encode("utf8")) + ) + backend.openssl_assert(res == 1) + return s + + +def _encode_asn1_str_gc(backend, data, length): + s = _encode_asn1_str(backend, data, length) + s = backend._ffi.gc(s, backend._lib.ASN1_OCTET_STRING_free) + return s + + +def _encode_inhibit_any_policy(backend, inhibit_any_policy): + return _encode_asn1_int_gc(backend, inhibit_any_policy.skip_certs) + + +def _encode_name(backend, name): + """ + The X509_NAME created will not be gc'd. Use _encode_name_gc if needed. + """ + subject = backend._lib.X509_NAME_new() + for rdn in name.rdns: + set_flag = 0 # indicate whether to add to last RDN or create new RDN + for attribute in rdn: + name_entry = _encode_name_entry(backend, attribute) + # X509_NAME_add_entry dups the object so we need to gc this copy + name_entry = backend._ffi.gc( + name_entry, backend._lib.X509_NAME_ENTRY_free + ) + res = backend._lib.X509_NAME_add_entry( + subject, name_entry, -1, set_flag) + backend.openssl_assert(res == 1) + set_flag = -1 + return subject + + +def _encode_name_gc(backend, attributes): + subject = _encode_name(backend, attributes) + subject = backend._ffi.gc(subject, backend._lib.X509_NAME_free) + return subject + + +def _encode_sk_name_entry(backend, attributes): + """ + The sk_X509_NAME_ENTRY created will not be gc'd. + """ + stack = backend._lib.sk_X509_NAME_ENTRY_new_null() + for attribute in attributes: + name_entry = _encode_name_entry(backend, attribute) + res = backend._lib.sk_X509_NAME_ENTRY_push(stack, name_entry) + backend.openssl_assert(res == 1) + return stack + + +def _encode_name_entry(backend, attribute): + if attribute._type is _ASN1Type.BMPString: + value = attribute.value.encode('utf_16_be') + else: + value = attribute.value.encode('utf8') + + obj = _txt2obj_gc(backend, attribute.oid.dotted_string) + + name_entry = backend._lib.X509_NAME_ENTRY_create_by_OBJ( + backend._ffi.NULL, obj, attribute._type.value, value, len(value) + ) + return name_entry + + +def _encode_crl_number_delta_crl_indicator(backend, ext): + return _encode_asn1_int_gc(backend, ext.crl_number) + + +def _encode_crl_reason(backend, crl_reason): + asn1enum = backend._lib.ASN1_ENUMERATED_new() + backend.openssl_assert(asn1enum != backend._ffi.NULL) + asn1enum = backend._ffi.gc(asn1enum, backend._lib.ASN1_ENUMERATED_free) + res = backend._lib.ASN1_ENUMERATED_set( + asn1enum, _CRL_ENTRY_REASON_ENUM_TO_CODE[crl_reason.reason] + ) + backend.openssl_assert(res == 1) + + return asn1enum + + +def _encode_invalidity_date(backend, invalidity_date): + time = backend._lib.ASN1_GENERALIZEDTIME_set( + backend._ffi.NULL, calendar.timegm( + invalidity_date.invalidity_date.timetuple() + ) + ) + backend.openssl_assert(time != backend._ffi.NULL) + time = backend._ffi.gc(time, backend._lib.ASN1_GENERALIZEDTIME_free) + + return time + + +def _encode_certificate_policies(backend, certificate_policies): + cp = backend._lib.sk_POLICYINFO_new_null() + backend.openssl_assert(cp != backend._ffi.NULL) + cp = backend._ffi.gc(cp, backend._lib.sk_POLICYINFO_free) + for policy_info in certificate_policies: + pi = backend._lib.POLICYINFO_new() + backend.openssl_assert(pi != backend._ffi.NULL) + res = backend._lib.sk_POLICYINFO_push(cp, pi) + backend.openssl_assert(res >= 1) + oid = _txt2obj(backend, policy_info.policy_identifier.dotted_string) + pi.policyid = oid + if policy_info.policy_qualifiers: + pqis = backend._lib.sk_POLICYQUALINFO_new_null() + backend.openssl_assert(pqis != backend._ffi.NULL) + for qualifier in policy_info.policy_qualifiers: + pqi = backend._lib.POLICYQUALINFO_new() + backend.openssl_assert(pqi != backend._ffi.NULL) + res = backend._lib.sk_POLICYQUALINFO_push(pqis, pqi) + backend.openssl_assert(res >= 1) + if isinstance(qualifier, six.text_type): + pqi.pqualid = _txt2obj( + backend, x509.OID_CPS_QUALIFIER.dotted_string + ) + pqi.d.cpsuri = _encode_asn1_str( + backend, + qualifier.encode("ascii"), + len(qualifier.encode("ascii")) + ) + else: + assert isinstance(qualifier, x509.UserNotice) + pqi.pqualid = _txt2obj( + backend, x509.OID_CPS_USER_NOTICE.dotted_string + ) + un = backend._lib.USERNOTICE_new() + backend.openssl_assert(un != backend._ffi.NULL) + pqi.d.usernotice = un + if qualifier.explicit_text: + un.exptext = _encode_asn1_utf8_str( + backend, qualifier.explicit_text + ) + + un.noticeref = _encode_notice_reference( + backend, qualifier.notice_reference + ) + + pi.qualifiers = pqis + + return cp + + +def _encode_notice_reference(backend, notice): + if notice is None: + return backend._ffi.NULL + else: + nr = backend._lib.NOTICEREF_new() + backend.openssl_assert(nr != backend._ffi.NULL) + # organization is a required field + nr.organization = _encode_asn1_utf8_str(backend, notice.organization) + + notice_stack = backend._lib.sk_ASN1_INTEGER_new_null() + nr.noticenos = notice_stack + for number in notice.notice_numbers: + num = _encode_asn1_int(backend, number) + res = backend._lib.sk_ASN1_INTEGER_push(notice_stack, num) + backend.openssl_assert(res >= 1) + + return nr + + +def _txt2obj(backend, name): + """ + Converts a Python string with an ASN.1 object ID in dotted form to a + ASN1_OBJECT. + """ + name = name.encode('ascii') + obj = backend._lib.OBJ_txt2obj(name, 1) + backend.openssl_assert(obj != backend._ffi.NULL) + return obj + + +def _txt2obj_gc(backend, name): + obj = _txt2obj(backend, name) + obj = backend._ffi.gc(obj, backend._lib.ASN1_OBJECT_free) + return obj + + +def _encode_ocsp_nocheck(backend, ext): + """ + The OCSP No Check extension is defined as a null ASN.1 value embedded in + an ASN.1 string. + """ + return _encode_asn1_str_gc(backend, b"\x05\x00", 2) + + +def _encode_key_usage(backend, key_usage): + set_bit = backend._lib.ASN1_BIT_STRING_set_bit + ku = backend._lib.ASN1_BIT_STRING_new() + ku = backend._ffi.gc(ku, backend._lib.ASN1_BIT_STRING_free) + res = set_bit(ku, 0, key_usage.digital_signature) + backend.openssl_assert(res == 1) + res = set_bit(ku, 1, key_usage.content_commitment) + backend.openssl_assert(res == 1) + res = set_bit(ku, 2, key_usage.key_encipherment) + backend.openssl_assert(res == 1) + res = set_bit(ku, 3, key_usage.data_encipherment) + backend.openssl_assert(res == 1) + res = set_bit(ku, 4, key_usage.key_agreement) + backend.openssl_assert(res == 1) + res = set_bit(ku, 5, key_usage.key_cert_sign) + backend.openssl_assert(res == 1) + res = set_bit(ku, 6, key_usage.crl_sign) + backend.openssl_assert(res == 1) + if key_usage.key_agreement: + res = set_bit(ku, 7, key_usage.encipher_only) + backend.openssl_assert(res == 1) + res = set_bit(ku, 8, key_usage.decipher_only) + backend.openssl_assert(res == 1) + else: + res = set_bit(ku, 7, 0) + backend.openssl_assert(res == 1) + res = set_bit(ku, 8, 0) + backend.openssl_assert(res == 1) + + return ku + + +def _encode_authority_key_identifier(backend, authority_keyid): + akid = backend._lib.AUTHORITY_KEYID_new() + backend.openssl_assert(akid != backend._ffi.NULL) + akid = backend._ffi.gc(akid, backend._lib.AUTHORITY_KEYID_free) + if authority_keyid.key_identifier is not None: + akid.keyid = _encode_asn1_str( + backend, + authority_keyid.key_identifier, + len(authority_keyid.key_identifier) + ) + + if authority_keyid.authority_cert_issuer is not None: + akid.issuer = _encode_general_names( + backend, authority_keyid.authority_cert_issuer + ) + + if authority_keyid.authority_cert_serial_number is not None: + akid.serial = _encode_asn1_int( + backend, authority_keyid.authority_cert_serial_number + ) + + return akid + + +def _encode_basic_constraints(backend, basic_constraints): + constraints = backend._lib.BASIC_CONSTRAINTS_new() + constraints = backend._ffi.gc( + constraints, backend._lib.BASIC_CONSTRAINTS_free + ) + constraints.ca = 255 if basic_constraints.ca else 0 + if basic_constraints.ca and basic_constraints.path_length is not None: + constraints.pathlen = _encode_asn1_int( + backend, basic_constraints.path_length + ) + + return constraints + + +def _encode_authority_information_access(backend, authority_info_access): + aia = backend._lib.sk_ACCESS_DESCRIPTION_new_null() + backend.openssl_assert(aia != backend._ffi.NULL) + aia = backend._ffi.gc( + aia, backend._lib.sk_ACCESS_DESCRIPTION_free + ) + for access_description in authority_info_access: + ad = backend._lib.ACCESS_DESCRIPTION_new() + method = _txt2obj( + backend, access_description.access_method.dotted_string + ) + gn = _encode_general_name(backend, access_description.access_location) + ad.method = method + ad.location = gn + res = backend._lib.sk_ACCESS_DESCRIPTION_push(aia, ad) + backend.openssl_assert(res >= 1) + + return aia + + +def _encode_general_names(backend, names): + general_names = backend._lib.GENERAL_NAMES_new() + backend.openssl_assert(general_names != backend._ffi.NULL) + for name in names: + gn = _encode_general_name(backend, name) + res = backend._lib.sk_GENERAL_NAME_push(general_names, gn) + backend.openssl_assert(res != 0) + + return general_names + + +def _encode_alt_name(backend, san): + general_names = _encode_general_names(backend, san) + general_names = backend._ffi.gc( + general_names, backend._lib.GENERAL_NAMES_free + ) + return general_names + + +def _encode_subject_key_identifier(backend, ski): + return _encode_asn1_str_gc(backend, ski.digest, len(ski.digest)) + + +def _encode_general_name(backend, name): + if isinstance(name, x509.DNSName): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + gn.type = backend._lib.GEN_DNS + + ia5 = backend._lib.ASN1_IA5STRING_new() + backend.openssl_assert(ia5 != backend._ffi.NULL) + # ia5strings are supposed to be ITU T.50 but to allow round-tripping + # of broken certs that encode utf8 we'll encode utf8 here too. + value = name.value.encode("utf8") + + res = backend._lib.ASN1_STRING_set(ia5, value, len(value)) + backend.openssl_assert(res == 1) + gn.d.dNSName = ia5 + elif isinstance(name, x509.RegisteredID): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + gn.type = backend._lib.GEN_RID + obj = backend._lib.OBJ_txt2obj( + name.value.dotted_string.encode('ascii'), 1 + ) + backend.openssl_assert(obj != backend._ffi.NULL) + gn.d.registeredID = obj + elif isinstance(name, x509.DirectoryName): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + dir_name = _encode_name(backend, name.value) + gn.type = backend._lib.GEN_DIRNAME + gn.d.directoryName = dir_name + elif isinstance(name, x509.IPAddress): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + if isinstance(name.value, ipaddress.IPv4Network): + packed = ( + name.value.network_address.packed + + utils.int_to_bytes(((1 << 32) - name.value.num_addresses), 4) + ) + elif isinstance(name.value, ipaddress.IPv6Network): + packed = ( + name.value.network_address.packed + + utils.int_to_bytes((1 << 128) - name.value.num_addresses, 16) + ) + else: + packed = name.value.packed + ipaddr = _encode_asn1_str(backend, packed, len(packed)) + gn.type = backend._lib.GEN_IPADD + gn.d.iPAddress = ipaddr + elif isinstance(name, x509.OtherName): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + other_name = backend._lib.OTHERNAME_new() + backend.openssl_assert(other_name != backend._ffi.NULL) + + type_id = backend._lib.OBJ_txt2obj( + name.type_id.dotted_string.encode('ascii'), 1 + ) + backend.openssl_assert(type_id != backend._ffi.NULL) + data = backend._ffi.new("unsigned char[]", name.value) + data_ptr_ptr = backend._ffi.new("unsigned char **") + data_ptr_ptr[0] = data + value = backend._lib.d2i_ASN1_TYPE( + backend._ffi.NULL, data_ptr_ptr, len(name.value) + ) + if value == backend._ffi.NULL: + backend._consume_errors() + raise ValueError("Invalid ASN.1 data") + other_name.type_id = type_id + other_name.value = value + gn.type = backend._lib.GEN_OTHERNAME + gn.d.otherName = other_name + elif isinstance(name, x509.RFC822Name): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + # ia5strings are supposed to be ITU T.50 but to allow round-tripping + # of broken certs that encode utf8 we'll encode utf8 here too. + data = name.value.encode("utf8") + asn1_str = _encode_asn1_str(backend, data, len(data)) + gn.type = backend._lib.GEN_EMAIL + gn.d.rfc822Name = asn1_str + elif isinstance(name, x509.UniformResourceIdentifier): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + # ia5strings are supposed to be ITU T.50 but to allow round-tripping + # of broken certs that encode utf8 we'll encode utf8 here too. + data = name.value.encode("utf8") + asn1_str = _encode_asn1_str(backend, data, len(data)) + gn.type = backend._lib.GEN_URI + gn.d.uniformResourceIdentifier = asn1_str + else: + raise ValueError( + "{0} is an unknown GeneralName type".format(name) + ) + + return gn + + +def _encode_extended_key_usage(backend, extended_key_usage): + eku = backend._lib.sk_ASN1_OBJECT_new_null() + eku = backend._ffi.gc(eku, backend._lib.sk_ASN1_OBJECT_free) + for oid in extended_key_usage: + obj = _txt2obj(backend, oid.dotted_string) + res = backend._lib.sk_ASN1_OBJECT_push(eku, obj) + backend.openssl_assert(res >= 1) + + return eku + + +_CRLREASONFLAGS = { + x509.ReasonFlags.key_compromise: 1, + x509.ReasonFlags.ca_compromise: 2, + x509.ReasonFlags.affiliation_changed: 3, + x509.ReasonFlags.superseded: 4, + x509.ReasonFlags.cessation_of_operation: 5, + x509.ReasonFlags.certificate_hold: 6, + x509.ReasonFlags.privilege_withdrawn: 7, + x509.ReasonFlags.aa_compromise: 8, +} + + +def _encode_cdps_freshest_crl(backend, cdps): + cdp = backend._lib.sk_DIST_POINT_new_null() + cdp = backend._ffi.gc(cdp, backend._lib.sk_DIST_POINT_free) + for point in cdps: + dp = backend._lib.DIST_POINT_new() + backend.openssl_assert(dp != backend._ffi.NULL) + + if point.reasons: + bitmask = backend._lib.ASN1_BIT_STRING_new() + backend.openssl_assert(bitmask != backend._ffi.NULL) + dp.reasons = bitmask + for reason in point.reasons: + res = backend._lib.ASN1_BIT_STRING_set_bit( + bitmask, _CRLREASONFLAGS[reason], 1 + ) + backend.openssl_assert(res == 1) + + if point.full_name: + dpn = backend._lib.DIST_POINT_NAME_new() + backend.openssl_assert(dpn != backend._ffi.NULL) + dpn.type = _DISTPOINT_TYPE_FULLNAME + dpn.name.fullname = _encode_general_names(backend, point.full_name) + dp.distpoint = dpn + + if point.relative_name: + dpn = backend._lib.DIST_POINT_NAME_new() + backend.openssl_assert(dpn != backend._ffi.NULL) + dpn.type = _DISTPOINT_TYPE_RELATIVENAME + relativename = _encode_sk_name_entry(backend, point.relative_name) + backend.openssl_assert(relativename != backend._ffi.NULL) + dpn.name.relativename = relativename + dp.distpoint = dpn + + if point.crl_issuer: + dp.CRLissuer = _encode_general_names(backend, point.crl_issuer) + + res = backend._lib.sk_DIST_POINT_push(cdp, dp) + backend.openssl_assert(res >= 1) + + return cdp + + +def _encode_name_constraints(backend, name_constraints): + nc = backend._lib.NAME_CONSTRAINTS_new() + backend.openssl_assert(nc != backend._ffi.NULL) + nc = backend._ffi.gc(nc, backend._lib.NAME_CONSTRAINTS_free) + permitted = _encode_general_subtree( + backend, name_constraints.permitted_subtrees + ) + nc.permittedSubtrees = permitted + excluded = _encode_general_subtree( + backend, name_constraints.excluded_subtrees + ) + nc.excludedSubtrees = excluded + + return nc + + +def _encode_policy_constraints(backend, policy_constraints): + pc = backend._lib.POLICY_CONSTRAINTS_new() + backend.openssl_assert(pc != backend._ffi.NULL) + pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free) + if policy_constraints.require_explicit_policy is not None: + pc.requireExplicitPolicy = _encode_asn1_int( + backend, policy_constraints.require_explicit_policy + ) + + if policy_constraints.inhibit_policy_mapping is not None: + pc.inhibitPolicyMapping = _encode_asn1_int( + backend, policy_constraints.inhibit_policy_mapping + ) + + return pc + + +def _encode_general_subtree(backend, subtrees): + if subtrees is None: + return backend._ffi.NULL + else: + general_subtrees = backend._lib.sk_GENERAL_SUBTREE_new_null() + for name in subtrees: + gs = backend._lib.GENERAL_SUBTREE_new() + gs.base = _encode_general_name(backend, name) + res = backend._lib.sk_GENERAL_SUBTREE_push(general_subtrees, gs) + assert res >= 1 + + return general_subtrees + + +_EXTENSION_ENCODE_HANDLERS = { + ExtensionOID.BASIC_CONSTRAINTS: _encode_basic_constraints, + ExtensionOID.SUBJECT_KEY_IDENTIFIER: _encode_subject_key_identifier, + ExtensionOID.KEY_USAGE: _encode_key_usage, + ExtensionOID.SUBJECT_ALTERNATIVE_NAME: _encode_alt_name, + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name, + ExtensionOID.EXTENDED_KEY_USAGE: _encode_extended_key_usage, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier, + ExtensionOID.CERTIFICATE_POLICIES: _encode_certificate_policies, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _encode_authority_information_access + ), + ExtensionOID.CRL_DISTRIBUTION_POINTS: _encode_cdps_freshest_crl, + ExtensionOID.FRESHEST_CRL: _encode_cdps_freshest_crl, + ExtensionOID.INHIBIT_ANY_POLICY: _encode_inhibit_any_policy, + ExtensionOID.OCSP_NO_CHECK: _encode_ocsp_nocheck, + ExtensionOID.NAME_CONSTRAINTS: _encode_name_constraints, + ExtensionOID.POLICY_CONSTRAINTS: _encode_policy_constraints, +} + +_CRL_EXTENSION_ENCODE_HANDLERS = { + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _encode_authority_information_access + ), + ExtensionOID.CRL_NUMBER: _encode_crl_number_delta_crl_indicator, + ExtensionOID.DELTA_CRL_INDICATOR: _encode_crl_number_delta_crl_indicator, +} + +_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS = { + CRLEntryExtensionOID.CERTIFICATE_ISSUER: _encode_alt_name, + CRLEntryExtensionOID.CRL_REASON: _encode_crl_reason, + CRLEntryExtensionOID.INVALIDITY_DATE: _encode_invalidity_date, +} diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/hashes.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/hashes.py new file mode 100644 index 0000000..92ea53b --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/hashes.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import hashes + + +@utils.register_interface(hashes.HashContext) +class _HashContext(object): + def __init__(self, backend, algorithm, ctx=None): + self._algorithm = algorithm + + self._backend = backend + + if ctx is None: + ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + name = self._backend._build_openssl_digest_name(algorithm) + evp_md = self._backend._lib.EVP_get_digestbyname(name) + if evp_md == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "{0} is not a supported hash on this backend.".format( + name), + _Reasons.UNSUPPORTED_HASH + ) + res = self._backend._lib.EVP_DigestInit_ex(ctx, evp_md, + self._backend._ffi.NULL) + self._backend.openssl_assert(res != 0) + + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def copy(self): + copied_ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_MD_CTX_copy_ex(copied_ctx, self._ctx) + self._backend.openssl_assert(res != 0) + return _HashContext(self._backend, self.algorithm, ctx=copied_ctx) + + def update(self, data): + res = self._backend._lib.EVP_DigestUpdate(self._ctx, data, len(data)) + self._backend.openssl_assert(res != 0) + + def finalize(self): + buf = self._backend._ffi.new("unsigned char[]", + self._backend._lib.EVP_MAX_MD_SIZE) + outlen = self._backend._ffi.new("unsigned int *") + res = self._backend._lib.EVP_DigestFinal_ex(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size) + return self._backend._ffi.buffer(buf)[:outlen[0]] diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/hmac.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/hmac.py new file mode 100644 index 0000000..3577f47 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/hmac.py @@ -0,0 +1,73 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.primitives import constant_time, hashes, mac + + +@utils.register_interface(mac.MACContext) +@utils.register_interface(hashes.HashContext) +class _HMACContext(object): + def __init__(self, backend, key, algorithm, ctx=None): + self._algorithm = algorithm + self._backend = backend + + if ctx is None: + ctx = self._backend._lib.Cryptography_HMAC_CTX_new() + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.Cryptography_HMAC_CTX_free + ) + name = self._backend._build_openssl_digest_name(algorithm) + evp_md = self._backend._lib.EVP_get_digestbyname(name) + if evp_md == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "{0} is not a supported hash on this backend".format(name), + _Reasons.UNSUPPORTED_HASH + ) + res = self._backend._lib.HMAC_Init_ex( + ctx, key, len(key), evp_md, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res != 0) + + self._ctx = ctx + self._key = key + + algorithm = utils.read_only_property("_algorithm") + + def copy(self): + copied_ctx = self._backend._lib.Cryptography_HMAC_CTX_new() + self._backend.openssl_assert(copied_ctx != self._backend._ffi.NULL) + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.Cryptography_HMAC_CTX_free + ) + res = self._backend._lib.HMAC_CTX_copy(copied_ctx, self._ctx) + self._backend.openssl_assert(res != 0) + return _HMACContext( + self._backend, self._key, self.algorithm, ctx=copied_ctx + ) + + def update(self, data): + res = self._backend._lib.HMAC_Update(self._ctx, data, len(data)) + self._backend.openssl_assert(res != 0) + + def finalize(self): + buf = self._backend._ffi.new("unsigned char[]", + self._backend._lib.EVP_MAX_MD_SIZE) + outlen = self._backend._ffi.new("unsigned int *") + res = self._backend._lib.HMAC_Final(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size) + return self._backend._ffi.buffer(buf)[:outlen[0]] + + def verify(self, signature): + digest = self.finalize() + if not constant_time.bytes_eq(digest, signature): + raise InvalidSignature("Signature did not match digest.") diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/rsa.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/rsa.py new file mode 100644 index 0000000..9a7bfaa --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/rsa.py @@ -0,0 +1,475 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import math + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, _check_not_prehashed, + _warn_sign_verify_deprecated +) +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import ( + AsymmetricSignatureContext, AsymmetricVerificationContext, rsa +) +from cryptography.hazmat.primitives.asymmetric.padding import ( + AsymmetricPadding, MGF1, OAEP, PKCS1v15, PSS, calculate_max_pss_salt_length +) +from cryptography.hazmat.primitives.asymmetric.rsa import ( + RSAPrivateKeyWithSerialization, RSAPublicKeyWithSerialization +) + + +def _get_rsa_pss_salt_length(pss, key, hash_algorithm): + salt = pss._salt_length + + if salt is MGF1.MAX_LENGTH or salt is PSS.MAX_LENGTH: + return calculate_max_pss_salt_length(key, hash_algorithm) + else: + return salt + + +def _enc_dec_rsa(backend, key, data, padding): + if not isinstance(padding, AsymmetricPadding): + raise TypeError("Padding must be an instance of AsymmetricPadding.") + + if isinstance(padding, PKCS1v15): + padding_enum = backend._lib.RSA_PKCS1_PADDING + elif isinstance(padding, OAEP): + padding_enum = backend._lib.RSA_PKCS1_OAEP_PADDING + + if not isinstance(padding._mgf, MGF1): + raise UnsupportedAlgorithm( + "Only MGF1 is supported by this backend.", + _Reasons.UNSUPPORTED_MGF + ) + + if not backend.rsa_padding_supported(padding): + raise UnsupportedAlgorithm( + "This combination of padding and hash algorithm is not " + "supported by this backend.", + _Reasons.UNSUPPORTED_PADDING + ) + + else: + raise UnsupportedAlgorithm( + "{0} is not supported by this backend.".format( + padding.name + ), + _Reasons.UNSUPPORTED_PADDING + ) + + return _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding) + + +def _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding): + if isinstance(key, _RSAPublicKey): + init = backend._lib.EVP_PKEY_encrypt_init + crypt = backend._lib.EVP_PKEY_encrypt + else: + init = backend._lib.EVP_PKEY_decrypt_init + crypt = backend._lib.EVP_PKEY_decrypt + + pkey_ctx = backend._lib.EVP_PKEY_CTX_new( + key._evp_pkey, backend._ffi.NULL + ) + backend.openssl_assert(pkey_ctx != backend._ffi.NULL) + pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) + res = init(pkey_ctx) + backend.openssl_assert(res == 1) + res = backend._lib.EVP_PKEY_CTX_set_rsa_padding( + pkey_ctx, padding_enum) + backend.openssl_assert(res > 0) + buf_size = backend._lib.EVP_PKEY_size(key._evp_pkey) + backend.openssl_assert(buf_size > 0) + if ( + isinstance(padding, OAEP) and + backend._lib.Cryptography_HAS_RSA_OAEP_MD + ): + mgf1_md = backend._lib.EVP_get_digestbyname( + padding._mgf._algorithm.name.encode("ascii")) + backend.openssl_assert(mgf1_md != backend._ffi.NULL) + res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) + backend.openssl_assert(res > 0) + oaep_md = backend._lib.EVP_get_digestbyname( + padding._algorithm.name.encode("ascii")) + backend.openssl_assert(oaep_md != backend._ffi.NULL) + res = backend._lib.EVP_PKEY_CTX_set_rsa_oaep_md(pkey_ctx, oaep_md) + backend.openssl_assert(res > 0) + + if ( + isinstance(padding, OAEP) and + padding._label is not None and + len(padding._label) > 0 + ): + # set0_rsa_oaep_label takes ownership of the char * so we need to + # copy it into some new memory + labelptr = backend._lib.OPENSSL_malloc(len(padding._label)) + backend.openssl_assert(labelptr != backend._ffi.NULL) + backend._ffi.memmove(labelptr, padding._label, len(padding._label)) + res = backend._lib.EVP_PKEY_CTX_set0_rsa_oaep_label( + pkey_ctx, labelptr, len(padding._label) + ) + backend.openssl_assert(res == 1) + + outlen = backend._ffi.new("size_t *", buf_size) + buf = backend._ffi.new("unsigned char[]", buf_size) + res = crypt(pkey_ctx, buf, outlen, data, len(data)) + if res <= 0: + _handle_rsa_enc_dec_error(backend, key) + + return backend._ffi.buffer(buf)[:outlen[0]] + + +def _handle_rsa_enc_dec_error(backend, key): + errors = backend._consume_errors() + backend.openssl_assert(errors) + assert errors[0].lib == backend._lib.ERR_LIB_RSA + if isinstance(key, _RSAPublicKey): + assert (errors[0].reason == + backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE) + raise ValueError( + "Data too long for key size. Encrypt less data or use a " + "larger key size." + ) + else: + decoding_errors = [ + backend._lib.RSA_R_BLOCK_TYPE_IS_NOT_01, + backend._lib.RSA_R_BLOCK_TYPE_IS_NOT_02, + backend._lib.RSA_R_OAEP_DECODING_ERROR, + # Though this error looks similar to the + # RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE, this occurs on decrypts, + # rather than on encrypts + backend._lib.RSA_R_DATA_TOO_LARGE_FOR_MODULUS, + ] + if backend._lib.Cryptography_HAS_RSA_R_PKCS_DECODING_ERROR: + decoding_errors.append(backend._lib.RSA_R_PKCS_DECODING_ERROR) + + assert errors[0].reason in decoding_errors + raise ValueError("Decryption failed.") + + +def _rsa_sig_determine_padding(backend, key, padding, algorithm): + if not isinstance(padding, AsymmetricPadding): + raise TypeError("Expected provider of AsymmetricPadding.") + + pkey_size = backend._lib.EVP_PKEY_size(key._evp_pkey) + backend.openssl_assert(pkey_size > 0) + + if isinstance(padding, PKCS1v15): + padding_enum = backend._lib.RSA_PKCS1_PADDING + elif isinstance(padding, PSS): + if not isinstance(padding._mgf, MGF1): + raise UnsupportedAlgorithm( + "Only MGF1 is supported by this backend.", + _Reasons.UNSUPPORTED_MGF + ) + + # Size of key in bytes - 2 is the maximum + # PSS signature length (salt length is checked later) + if pkey_size - algorithm.digest_size - 2 < 0: + raise ValueError("Digest too large for key size. Use a larger " + "key or different digest.") + + padding_enum = backend._lib.RSA_PKCS1_PSS_PADDING + else: + raise UnsupportedAlgorithm( + "{0} is not supported by this backend.".format(padding.name), + _Reasons.UNSUPPORTED_PADDING + ) + + return padding_enum + + +def _rsa_sig_setup(backend, padding, algorithm, key, data, init_func): + padding_enum = _rsa_sig_determine_padding(backend, key, padding, algorithm) + evp_md = backend._lib.EVP_get_digestbyname(algorithm.name.encode("ascii")) + backend.openssl_assert(evp_md != backend._ffi.NULL) + pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) + backend.openssl_assert(pkey_ctx != backend._ffi.NULL) + pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) + res = init_func(pkey_ctx) + backend.openssl_assert(res == 1) + res = backend._lib.EVP_PKEY_CTX_set_signature_md(pkey_ctx, evp_md) + backend.openssl_assert(res > 0) + res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) + backend.openssl_assert(res > 0) + if isinstance(padding, PSS): + res = backend._lib.EVP_PKEY_CTX_set_rsa_pss_saltlen( + pkey_ctx, _get_rsa_pss_salt_length(padding, key, algorithm) + ) + backend.openssl_assert(res > 0) + + mgf1_md = backend._lib.EVP_get_digestbyname( + padding._mgf._algorithm.name.encode("ascii") + ) + backend.openssl_assert(mgf1_md != backend._ffi.NULL) + res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) + backend.openssl_assert(res > 0) + + return pkey_ctx + + +def _rsa_sig_sign(backend, padding, algorithm, private_key, data): + pkey_ctx = _rsa_sig_setup( + backend, padding, algorithm, private_key, data, + backend._lib.EVP_PKEY_sign_init + ) + buflen = backend._ffi.new("size_t *") + res = backend._lib.EVP_PKEY_sign( + pkey_ctx, + backend._ffi.NULL, + buflen, + data, + len(data) + ) + backend.openssl_assert(res == 1) + buf = backend._ffi.new("unsigned char[]", buflen[0]) + res = backend._lib.EVP_PKEY_sign( + pkey_ctx, buf, buflen, data, len(data)) + if res != 1: + errors = backend._consume_errors() + assert errors[0].lib == backend._lib.ERR_LIB_RSA + reason = None + if (errors[0].reason == + backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE): + reason = ("Salt length too long for key size. Try using " + "MAX_LENGTH instead.") + else: + assert (errors[0].reason == + backend._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY) + reason = "Digest too large for key size. Use a larger key." + assert reason is not None + raise ValueError(reason) + + return backend._ffi.buffer(buf)[:] + + +def _rsa_sig_verify(backend, padding, algorithm, public_key, signature, data): + pkey_ctx = _rsa_sig_setup( + backend, padding, algorithm, public_key, data, + backend._lib.EVP_PKEY_verify_init + ) + res = backend._lib.EVP_PKEY_verify( + pkey_ctx, signature, len(signature), data, len(data) + ) + # The previous call can return negative numbers in the event of an + # error. This is not a signature failure but we need to fail if it + # occurs. + backend.openssl_assert(res >= 0) + if res == 0: + backend._consume_errors() + raise InvalidSignature + + +@utils.register_interface(AsymmetricSignatureContext) +class _RSASignatureContext(object): + def __init__(self, backend, private_key, padding, algorithm): + self._backend = backend + self._private_key = private_key + + # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However + # we need to make a pointless call to it here so we maintain the + # API of erroring on init with this context if the values are invalid. + _rsa_sig_determine_padding(backend, private_key, padding, algorithm) + self._padding = padding + self._algorithm = algorithm + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def finalize(self): + return _rsa_sig_sign( + self._backend, + self._padding, + self._algorithm, + self._private_key, + self._hash_ctx.finalize() + ) + + +@utils.register_interface(AsymmetricVerificationContext) +class _RSAVerificationContext(object): + def __init__(self, backend, public_key, signature, padding, algorithm): + self._backend = backend + self._public_key = public_key + self._signature = signature + self._padding = padding + # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However + # we need to make a pointless call to it here so we maintain the + # API of erroring on init with this context if the values are invalid. + _rsa_sig_determine_padding(backend, public_key, padding, algorithm) + + padding = padding + self._algorithm = algorithm + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def verify(self): + return _rsa_sig_verify( + self._backend, + self._padding, + self._algorithm, + self._public_key, + self._signature, + self._hash_ctx.finalize() + ) + + +@utils.register_interface(RSAPrivateKeyWithSerialization) +class _RSAPrivateKey(object): + def __init__(self, backend, rsa_cdata, evp_pkey): + self._backend = backend + self._rsa_cdata = rsa_cdata + self._evp_pkey = evp_pkey + + n = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, n, self._backend._ffi.NULL, + self._backend._ffi.NULL + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(n[0]) + + key_size = utils.read_only_property("_key_size") + + def signer(self, padding, algorithm): + _warn_sign_verify_deprecated() + _check_not_prehashed(algorithm) + return _RSASignatureContext(self._backend, self, padding, algorithm) + + def decrypt(self, ciphertext, padding): + key_size_bytes = int(math.ceil(self.key_size / 8.0)) + if key_size_bytes != len(ciphertext): + raise ValueError("Ciphertext length must be equal to key size.") + + return _enc_dec_rsa(self._backend, self, ciphertext, padding) + + def public_key(self): + ctx = self._backend._lib.RSAPublicKey_dup(self._rsa_cdata) + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.RSA_free) + res = self._backend._lib.RSA_blinding_on(ctx, self._backend._ffi.NULL) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._rsa_cdata_to_evp_pkey(ctx) + return _RSAPublicKey(self._backend, ctx, evp_pkey) + + def private_numbers(self): + n = self._backend._ffi.new("BIGNUM **") + e = self._backend._ffi.new("BIGNUM **") + d = self._backend._ffi.new("BIGNUM **") + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + dmp1 = self._backend._ffi.new("BIGNUM **") + dmq1 = self._backend._ffi.new("BIGNUM **") + iqmp = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key(self._rsa_cdata, n, e, d) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(d[0] != self._backend._ffi.NULL) + self._backend._lib.RSA_get0_factors(self._rsa_cdata, p, q) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend._lib.RSA_get0_crt_params( + self._rsa_cdata, dmp1, dmq1, iqmp + ) + self._backend.openssl_assert(dmp1[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(dmq1[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(iqmp[0] != self._backend._ffi.NULL) + return rsa.RSAPrivateNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + d=self._backend._bn_to_int(d[0]), + dmp1=self._backend._bn_to_int(dmp1[0]), + dmq1=self._backend._bn_to_int(dmq1[0]), + iqmp=self._backend._bn_to_int(iqmp[0]), + public_numbers=rsa.RSAPublicNumbers( + e=self._backend._bn_to_int(e[0]), + n=self._backend._bn_to_int(n[0]), + ) + ) + + def private_bytes(self, encoding, format, encryption_algorithm): + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._rsa_cdata + ) + + def sign(self, data, padding, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _rsa_sig_sign(self._backend, padding, algorithm, self, data) + + +@utils.register_interface(RSAPublicKeyWithSerialization) +class _RSAPublicKey(object): + def __init__(self, backend, rsa_cdata, evp_pkey): + self._backend = backend + self._rsa_cdata = rsa_cdata + self._evp_pkey = evp_pkey + + n = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, n, self._backend._ffi.NULL, + self._backend._ffi.NULL + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(n[0]) + + key_size = utils.read_only_property("_key_size") + + def verifier(self, signature, padding, algorithm): + _warn_sign_verify_deprecated() + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + + _check_not_prehashed(algorithm) + return _RSAVerificationContext( + self._backend, self, signature, padding, algorithm + ) + + def encrypt(self, plaintext, padding): + return _enc_dec_rsa(self._backend, self, plaintext, padding) + + def public_numbers(self): + n = self._backend._ffi.new("BIGNUM **") + e = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, n, e, self._backend._ffi.NULL + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) + return rsa.RSAPublicNumbers( + e=self._backend._bn_to_int(e[0]), + n=self._backend._bn_to_int(n[0]), + ) + + def public_bytes(self, encoding, format): + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + self._rsa_cdata + ) + + def verify(self, signature, data, padding, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _rsa_sig_verify( + self._backend, padding, algorithm, self, signature, data + ) diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/utils.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/utils.py new file mode 100644 index 0000000..05d0fe5 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/utils.py @@ -0,0 +1,45 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import warnings + +from cryptography import utils +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + + +def _calculate_digest_and_algorithm(backend, data, algorithm): + if not isinstance(algorithm, Prehashed): + hash_ctx = hashes.Hash(algorithm, backend) + hash_ctx.update(data) + data = hash_ctx.finalize() + else: + algorithm = algorithm._algorithm + + if len(data) != algorithm.digest_size: + raise ValueError( + "The provided data must be the same length as the hash " + "algorithm's digest size." + ) + + return (data, algorithm) + + +def _check_not_prehashed(signature_algorithm): + if isinstance(signature_algorithm, Prehashed): + raise TypeError( + "Prehashed is only supported in the sign and verify methods. " + "It cannot be used with signer or verifier." + ) + + +def _warn_sign_verify_deprecated(): + warnings.warn( + "signer and verifier have been deprecated. Please use sign " + "and verify instead.", + utils.PersistentlyDeprecated, + stacklevel=3 + ) diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/x25519.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/x25519.py new file mode 100644 index 0000000..983ece6 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/x25519.py @@ -0,0 +1,79 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.hazmat.primitives.asymmetric.x25519 import ( + X25519PrivateKey, X25519PublicKey +) + + +@utils.register_interface(X25519PublicKey) +class _X25519PublicKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes(self): + ucharpp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.EVP_PKEY_get1_tls_encodedpoint( + self._evp_pkey, ucharpp + ) + self._backend.openssl_assert(res == 32) + self._backend.openssl_assert(ucharpp[0] != self._backend._ffi.NULL) + data = self._backend._ffi.gc( + ucharpp[0], self._backend._lib.OPENSSL_free + ) + return self._backend._ffi.buffer(data, res)[:] + + +@utils.register_interface(X25519PrivateKey) +class _X25519PrivateKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self): + bio = self._backend._create_mem_bio_gc() + res = self._backend._lib.i2d_PUBKEY_bio(bio, self._evp_pkey) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._lib.d2i_PUBKEY_bio( + bio, self._backend._ffi.NULL + ) + self._backend.openssl_assert(evp_pkey != self._backend._ffi.NULL) + evp_pkey = self._backend._ffi.gc( + evp_pkey, self._backend._lib.EVP_PKEY_free + ) + return _X25519PublicKey(self._backend, evp_pkey) + + def exchange(self, peer_public_key): + if not isinstance(peer_public_key, X25519PublicKey): + raise TypeError("peer_public_key must be X25519PublicKey.") + + ctx = self._backend._lib.EVP_PKEY_CTX_new( + self._evp_pkey, self._backend._ffi.NULL + ) + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.EVP_PKEY_CTX_free) + res = self._backend._lib.EVP_PKEY_derive_init(ctx) + self._backend.openssl_assert(res == 1) + res = self._backend._lib.EVP_PKEY_derive_set_peer( + ctx, peer_public_key._evp_pkey + ) + self._backend.openssl_assert(res == 1) + keylen = self._backend._ffi.new("size_t *") + res = self._backend._lib.EVP_PKEY_derive( + ctx, self._backend._ffi.NULL, keylen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(keylen[0] > 0) + buf = self._backend._ffi.new("unsigned char[]", keylen[0]) + res = self._backend._lib.EVP_PKEY_derive(ctx, buf, keylen) + if res != 1: + raise ValueError( + "Null shared key derived from public/private pair." + ) + + return self._backend._ffi.buffer(buf, keylen[0])[:] diff --git a/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/x509.py b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/x509.py new file mode 100644 index 0000000..b870eeb --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/backends/openssl/x509.py @@ -0,0 +1,518 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import datetime +import operator +import warnings + +from cryptography import utils, x509 +from cryptography.exceptions import UnsupportedAlgorithm +from cryptography.hazmat.backends.openssl.decode_asn1 import ( + _CERTIFICATE_EXTENSION_PARSER, _CERTIFICATE_EXTENSION_PARSER_NO_SCT, + _CRL_EXTENSION_PARSER, _CSR_EXTENSION_PARSER, + _REVOKED_CERTIFICATE_EXTENSION_PARSER, _asn1_integer_to_int, + _asn1_string_to_bytes, _decode_x509_name, _obj2txt, _parse_asn1_time +) +from cryptography.hazmat.backends.openssl.encode_asn1 import ( + _encode_asn1_int_gc +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa + + +@utils.register_interface(x509.Certificate) +class _Certificate(object): + def __init__(self, backend, x509): + self._backend = backend + self._x509 = x509 + + def __repr__(self): + return "".format(self.subject) + + def __eq__(self, other): + if not isinstance(other, x509.Certificate): + return NotImplemented + + res = self._backend._lib.X509_cmp(self._x509, other._x509) + return res == 0 + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.public_bytes(serialization.Encoding.DER)) + + def fingerprint(self, algorithm): + h = hashes.Hash(algorithm, self._backend) + h.update(self.public_bytes(serialization.Encoding.DER)) + return h.finalize() + + @property + def version(self): + version = self._backend._lib.X509_get_version(self._x509) + if version == 0: + return x509.Version.v1 + elif version == 2: + return x509.Version.v3 + else: + raise x509.InvalidVersion( + "{0} is not a valid X509 version".format(version), version + ) + + @property + def serial(self): + warnings.warn( + "Certificate serial is deprecated, use serial_number instead.", + utils.PersistentlyDeprecated, + stacklevel=2 + ) + return self.serial_number + + @property + def serial_number(self): + asn1_int = self._backend._lib.X509_get_serialNumber(self._x509) + self._backend.openssl_assert(asn1_int != self._backend._ffi.NULL) + return _asn1_integer_to_int(self._backend, asn1_int) + + def public_key(self): + pkey = self._backend._lib.X509_get_pubkey(self._x509) + if pkey == self._backend._ffi.NULL: + # Remove errors from the stack. + self._backend._consume_errors() + raise ValueError("Certificate public key is of an unknown type") + + pkey = self._backend._ffi.gc(pkey, self._backend._lib.EVP_PKEY_free) + + return self._backend._evp_pkey_to_public_key(pkey) + + @property + def not_valid_before(self): + asn1_time = self._backend._lib.X509_get_notBefore(self._x509) + return _parse_asn1_time(self._backend, asn1_time) + + @property + def not_valid_after(self): + asn1_time = self._backend._lib.X509_get_notAfter(self._x509) + return _parse_asn1_time(self._backend, asn1_time) + + @property + def issuer(self): + issuer = self._backend._lib.X509_get_issuer_name(self._x509) + self._backend.openssl_assert(issuer != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, issuer) + + @property + def subject(self): + subject = self._backend._lib.X509_get_subject_name(self._x509) + self._backend.openssl_assert(subject != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, subject) + + @property + def signature_hash_algorithm(self): + oid = self.signature_algorithm_oid + try: + return x509._SIG_OIDS_TO_HASH[oid] + except KeyError: + raise UnsupportedAlgorithm( + "Signature algorithm OID:{0} not recognized".format(oid) + ) + + @property + def signature_algorithm_oid(self): + alg = self._backend._ffi.new("X509_ALGOR **") + self._backend._lib.X509_get0_signature( + self._backend._ffi.NULL, alg, self._x509 + ) + self._backend.openssl_assert(alg[0] != self._backend._ffi.NULL) + oid = _obj2txt(self._backend, alg[0].algorithm) + return x509.ObjectIdentifier(oid) + + @utils.cached_property + def extensions(self): + if self._backend._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER: + return _CERTIFICATE_EXTENSION_PARSER.parse( + self._backend, self._x509 + ) + else: + return _CERTIFICATE_EXTENSION_PARSER_NO_SCT.parse( + self._backend, self._x509 + ) + + @property + def signature(self): + sig = self._backend._ffi.new("ASN1_BIT_STRING **") + self._backend._lib.X509_get0_signature( + sig, self._backend._ffi.NULL, self._x509 + ) + self._backend.openssl_assert(sig[0] != self._backend._ffi.NULL) + return _asn1_string_to_bytes(self._backend, sig[0]) + + @property + def tbs_certificate_bytes(self): + pp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.i2d_re_X509_tbs(self._x509, pp) + self._backend.openssl_assert(res > 0) + pp = self._backend._ffi.gc( + pp, lambda pointer: self._backend._lib.OPENSSL_free(pointer[0]) + ) + return self._backend._ffi.buffer(pp[0], res)[:] + + def public_bytes(self, encoding): + bio = self._backend._create_mem_bio_gc() + if encoding is serialization.Encoding.PEM: + res = self._backend._lib.PEM_write_bio_X509(bio, self._x509) + elif encoding is serialization.Encoding.DER: + res = self._backend._lib.i2d_X509_bio(bio, self._x509) + else: + raise TypeError("encoding must be an item from the Encoding enum") + + self._backend.openssl_assert(res == 1) + return self._backend._read_mem_bio(bio) + + +@utils.register_interface(x509.RevokedCertificate) +class _RevokedCertificate(object): + def __init__(self, backend, crl, x509_revoked): + self._backend = backend + # The X509_REVOKED_value is a X509_REVOKED * that has + # no reference counting. This means when X509_CRL_free is + # called then the CRL and all X509_REVOKED * are freed. Since + # you can retain a reference to a single revoked certificate + # and let the CRL fall out of scope we need to retain a + # private reference to the CRL inside the RevokedCertificate + # object to prevent the gc from being called inappropriately. + self._crl = crl + self._x509_revoked = x509_revoked + + @property + def serial_number(self): + asn1_int = self._backend._lib.X509_REVOKED_get0_serialNumber( + self._x509_revoked + ) + self._backend.openssl_assert(asn1_int != self._backend._ffi.NULL) + return _asn1_integer_to_int(self._backend, asn1_int) + + @property + def revocation_date(self): + return _parse_asn1_time( + self._backend, + self._backend._lib.X509_REVOKED_get0_revocationDate( + self._x509_revoked + ) + ) + + @utils.cached_property + def extensions(self): + return _REVOKED_CERTIFICATE_EXTENSION_PARSER.parse( + self._backend, self._x509_revoked + ) + + +@utils.register_interface(x509.CertificateRevocationList) +class _CertificateRevocationList(object): + def __init__(self, backend, x509_crl): + self._backend = backend + self._x509_crl = x509_crl + + def __eq__(self, other): + if not isinstance(other, x509.CertificateRevocationList): + return NotImplemented + + res = self._backend._lib.X509_CRL_cmp(self._x509_crl, other._x509_crl) + return res == 0 + + def __ne__(self, other): + return not self == other + + def fingerprint(self, algorithm): + h = hashes.Hash(algorithm, self._backend) + bio = self._backend._create_mem_bio_gc() + res = self._backend._lib.i2d_X509_CRL_bio( + bio, self._x509_crl + ) + self._backend.openssl_assert(res == 1) + der = self._backend._read_mem_bio(bio) + h.update(der) + return h.finalize() + + def get_revoked_certificate_by_serial_number(self, serial_number): + revoked = self._backend._ffi.new("X509_REVOKED **") + asn1_int = _encode_asn1_int_gc(self._backend, serial_number) + res = self._backend._lib.X509_CRL_get0_by_serial( + self._x509_crl, revoked, asn1_int + ) + if res == 0: + return None + else: + self._backend.openssl_assert( + revoked[0] != self._backend._ffi.NULL + ) + return _RevokedCertificate( + self._backend, self._x509_crl, revoked[0] + ) + + @property + def signature_hash_algorithm(self): + oid = self.signature_algorithm_oid + try: + return x509._SIG_OIDS_TO_HASH[oid] + except KeyError: + raise UnsupportedAlgorithm( + "Signature algorithm OID:{0} not recognized".format(oid) + ) + + @property + def signature_algorithm_oid(self): + alg = self._backend._ffi.new("X509_ALGOR **") + self._backend._lib.X509_CRL_get0_signature( + self._x509_crl, self._backend._ffi.NULL, alg + ) + self._backend.openssl_assert(alg[0] != self._backend._ffi.NULL) + oid = _obj2txt(self._backend, alg[0].algorithm) + return x509.ObjectIdentifier(oid) + + @property + def issuer(self): + issuer = self._backend._lib.X509_CRL_get_issuer(self._x509_crl) + self._backend.openssl_assert(issuer != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, issuer) + + @property + def next_update(self): + nu = self._backend._lib.X509_CRL_get_nextUpdate(self._x509_crl) + self._backend.openssl_assert(nu != self._backend._ffi.NULL) + return _parse_asn1_time(self._backend, nu) + + @property + def last_update(self): + lu = self._backend._lib.X509_CRL_get_lastUpdate(self._x509_crl) + self._backend.openssl_assert(lu != self._backend._ffi.NULL) + return _parse_asn1_time(self._backend, lu) + + @property + def signature(self): + sig = self._backend._ffi.new("ASN1_BIT_STRING **") + self._backend._lib.X509_CRL_get0_signature( + self._x509_crl, sig, self._backend._ffi.NULL + ) + self._backend.openssl_assert(sig[0] != self._backend._ffi.NULL) + return _asn1_string_to_bytes(self._backend, sig[0]) + + @property + def tbs_certlist_bytes(self): + pp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.i2d_re_X509_CRL_tbs(self._x509_crl, pp) + self._backend.openssl_assert(res > 0) + pp = self._backend._ffi.gc( + pp, lambda pointer: self._backend._lib.OPENSSL_free(pointer[0]) + ) + return self._backend._ffi.buffer(pp[0], res)[:] + + def public_bytes(self, encoding): + bio = self._backend._create_mem_bio_gc() + if encoding is serialization.Encoding.PEM: + res = self._backend._lib.PEM_write_bio_X509_CRL( + bio, self._x509_crl + ) + elif encoding is serialization.Encoding.DER: + res = self._backend._lib.i2d_X509_CRL_bio(bio, self._x509_crl) + else: + raise TypeError("encoding must be an item from the Encoding enum") + + self._backend.openssl_assert(res == 1) + return self._backend._read_mem_bio(bio) + + def _revoked_cert(self, idx): + revoked = self._backend._lib.X509_CRL_get_REVOKED(self._x509_crl) + r = self._backend._lib.sk_X509_REVOKED_value(revoked, idx) + self._backend.openssl_assert(r != self._backend._ffi.NULL) + return _RevokedCertificate(self._backend, self, r) + + def __iter__(self): + for i in range(len(self)): + yield self._revoked_cert(i) + + def __getitem__(self, idx): + if isinstance(idx, slice): + start, stop, step = idx.indices(len(self)) + return [self._revoked_cert(i) for i in range(start, stop, step)] + else: + idx = operator.index(idx) + if idx < 0: + idx += len(self) + if not 0 <= idx < len(self): + raise IndexError + return self._revoked_cert(idx) + + def __len__(self): + revoked = self._backend._lib.X509_CRL_get_REVOKED(self._x509_crl) + if revoked == self._backend._ffi.NULL: + return 0 + else: + return self._backend._lib.sk_X509_REVOKED_num(revoked) + + @utils.cached_property + def extensions(self): + return _CRL_EXTENSION_PARSER.parse(self._backend, self._x509_crl) + + def is_signature_valid(self, public_key): + if not isinstance(public_key, (dsa.DSAPublicKey, rsa.RSAPublicKey, + ec.EllipticCurvePublicKey)): + raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey,' + ' or EllipticCurvePublicKey.') + res = self._backend._lib.X509_CRL_verify( + self._x509_crl, public_key._evp_pkey + ) + + if res != 1: + self._backend._consume_errors() + return False + + return True + + +@utils.register_interface(x509.CertificateSigningRequest) +class _CertificateSigningRequest(object): + def __init__(self, backend, x509_req): + self._backend = backend + self._x509_req = x509_req + + def __eq__(self, other): + if not isinstance(other, _CertificateSigningRequest): + return NotImplemented + + self_bytes = self.public_bytes(serialization.Encoding.DER) + other_bytes = other.public_bytes(serialization.Encoding.DER) + return self_bytes == other_bytes + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.public_bytes(serialization.Encoding.DER)) + + def public_key(self): + pkey = self._backend._lib.X509_REQ_get_pubkey(self._x509_req) + self._backend.openssl_assert(pkey != self._backend._ffi.NULL) + pkey = self._backend._ffi.gc(pkey, self._backend._lib.EVP_PKEY_free) + return self._backend._evp_pkey_to_public_key(pkey) + + @property + def subject(self): + subject = self._backend._lib.X509_REQ_get_subject_name(self._x509_req) + self._backend.openssl_assert(subject != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, subject) + + @property + def signature_hash_algorithm(self): + oid = self.signature_algorithm_oid + try: + return x509._SIG_OIDS_TO_HASH[oid] + except KeyError: + raise UnsupportedAlgorithm( + "Signature algorithm OID:{0} not recognized".format(oid) + ) + + @property + def signature_algorithm_oid(self): + alg = self._backend._ffi.new("X509_ALGOR **") + self._backend._lib.X509_REQ_get0_signature( + self._x509_req, self._backend._ffi.NULL, alg + ) + self._backend.openssl_assert(alg[0] != self._backend._ffi.NULL) + oid = _obj2txt(self._backend, alg[0].algorithm) + return x509.ObjectIdentifier(oid) + + @utils.cached_property + def extensions(self): + x509_exts = self._backend._lib.X509_REQ_get_extensions(self._x509_req) + return _CSR_EXTENSION_PARSER.parse(self._backend, x509_exts) + + def public_bytes(self, encoding): + bio = self._backend._create_mem_bio_gc() + if encoding is serialization.Encoding.PEM: + res = self._backend._lib.PEM_write_bio_X509_REQ( + bio, self._x509_req + ) + elif encoding is serialization.Encoding.DER: + res = self._backend._lib.i2d_X509_REQ_bio(bio, self._x509_req) + else: + raise TypeError("encoding must be an item from the Encoding enum") + + self._backend.openssl_assert(res == 1) + return self._backend._read_mem_bio(bio) + + @property + def tbs_certrequest_bytes(self): + pp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.i2d_re_X509_REQ_tbs(self._x509_req, pp) + self._backend.openssl_assert(res > 0) + pp = self._backend._ffi.gc( + pp, lambda pointer: self._backend._lib.OPENSSL_free(pointer[0]) + ) + return self._backend._ffi.buffer(pp[0], res)[:] + + @property + def signature(self): + sig = self._backend._ffi.new("ASN1_BIT_STRING **") + self._backend._lib.X509_REQ_get0_signature( + self._x509_req, sig, self._backend._ffi.NULL + ) + self._backend.openssl_assert(sig[0] != self._backend._ffi.NULL) + return _asn1_string_to_bytes(self._backend, sig[0]) + + @property + def is_signature_valid(self): + pkey = self._backend._lib.X509_REQ_get_pubkey(self._x509_req) + self._backend.openssl_assert(pkey != self._backend._ffi.NULL) + pkey = self._backend._ffi.gc(pkey, self._backend._lib.EVP_PKEY_free) + res = self._backend._lib.X509_REQ_verify(self._x509_req, pkey) + + if res != 1: + self._backend._consume_errors() + return False + + return True + + +@utils.register_interface( + x509.certificate_transparency.SignedCertificateTimestamp +) +class _SignedCertificateTimestamp(object): + def __init__(self, backend, sct_list, sct): + self._backend = backend + # Keep the SCT_LIST that this SCT came from alive. + self._sct_list = sct_list + self._sct = sct + + @property + def version(self): + version = self._backend._lib.SCT_get_version(self._sct) + assert version == self._backend._lib.SCT_VERSION_V1 + return x509.certificate_transparency.Version.v1 + + @property + def log_id(self): + out = self._backend._ffi.new("unsigned char **") + log_id_length = self._backend._lib.SCT_get0_log_id(self._sct, out) + assert log_id_length >= 0 + return self._backend._ffi.buffer(out[0], log_id_length)[:] + + @property + def timestamp(self): + timestamp = self._backend._lib.SCT_get_timestamp(self._sct) + milliseconds = timestamp % 1000 + return datetime.datetime.utcfromtimestamp( + timestamp // 1000 + ).replace(microsecond=milliseconds * 1000) + + @property + def entry_type(self): + entry_type = self._backend._lib.SCT_get_log_entry_type(self._sct) + # We currently only support loading SCTs from the X.509 extension, so + # we only have precerts. + assert entry_type == self._backend._lib.CT_LOG_ENTRY_TYPE_PRECERT + return x509.certificate_transparency.LogEntryType.PRE_CERTIFICATE diff --git a/server/www/packages/packages-common/cryptography/hazmat/bindings/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/bindings/__init__.py new file mode 100644 index 0000000..4b54088 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/bindings/__init__.py @@ -0,0 +1,5 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-common/cryptography/hazmat/bindings/_constant_time.cp37-win32.pyd b/server/www/packages/packages-common/cryptography/hazmat/bindings/_constant_time.cp37-win32.pyd new file mode 100644 index 0000000..7e80078 Binary files /dev/null and b/server/www/packages/packages-common/cryptography/hazmat/bindings/_constant_time.cp37-win32.pyd differ diff --git a/server/www/packages/packages-common/cryptography/hazmat/bindings/_openssl.cp37-win32.pyd b/server/www/packages/packages-common/cryptography/hazmat/bindings/_openssl.cp37-win32.pyd new file mode 100644 index 0000000..06ec31e Binary files /dev/null and b/server/www/packages/packages-common/cryptography/hazmat/bindings/_openssl.cp37-win32.pyd differ diff --git a/server/www/packages/packages-common/cryptography/hazmat/bindings/_padding.cp37-win32.pyd b/server/www/packages/packages-common/cryptography/hazmat/bindings/_padding.cp37-win32.pyd new file mode 100644 index 0000000..72ae813 Binary files /dev/null and b/server/www/packages/packages-common/cryptography/hazmat/bindings/_padding.cp37-win32.pyd differ diff --git a/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/__init__.py new file mode 100644 index 0000000..4b54088 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/__init__.py @@ -0,0 +1,5 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/_conditional.py b/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/_conditional.py new file mode 100644 index 0000000..b3e4e8b --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/_conditional.py @@ -0,0 +1,302 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +def cryptography_has_ec2m(): + return [ + "EC_POINT_set_affine_coordinates_GF2m", + "EC_POINT_get_affine_coordinates_GF2m", + "EC_POINT_set_compressed_coordinates_GF2m", + ] + + +def cryptography_has_ec_1_0_2(): + return [ + "EC_curve_nid2nist", + ] + + +def cryptography_has_set_ecdh_auto(): + return [ + "SSL_CTX_set_ecdh_auto", + ] + + +def cryptography_has_rsa_r_pkcs_decoding_error(): + return [ + "RSA_R_PKCS_DECODING_ERROR" + ] + + +def cryptography_has_rsa_oaep_md(): + return [ + "EVP_PKEY_CTX_set_rsa_oaep_md", + ] + + +def cryptography_has_rsa_oaep_label(): + return [ + "EVP_PKEY_CTX_set0_rsa_oaep_label", + ] + + +def cryptography_has_ssl3_method(): + return [ + "SSLv3_method", + "SSLv3_client_method", + "SSLv3_server_method", + ] + + +def cryptography_has_alpn(): + return [ + "SSL_CTX_set_alpn_protos", + "SSL_set_alpn_protos", + "SSL_CTX_set_alpn_select_cb", + "SSL_get0_alpn_selected", + ] + + +def cryptography_has_compression(): + return [ + "SSL_get_current_compression", + "SSL_get_current_expansion", + "SSL_COMP_get_name", + ] + + +def cryptography_has_get_server_tmp_key(): + return [ + "SSL_get_server_tmp_key", + ] + + +def cryptography_has_102_verification_error_codes(): + return [ + 'X509_V_ERR_SUITE_B_INVALID_VERSION', + 'X509_V_ERR_SUITE_B_INVALID_ALGORITHM', + 'X509_V_ERR_SUITE_B_INVALID_CURVE', + 'X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM', + 'X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED', + 'X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256', + 'X509_V_ERR_HOSTNAME_MISMATCH', + 'X509_V_ERR_EMAIL_MISMATCH', + 'X509_V_ERR_IP_ADDRESS_MISMATCH' + ] + + +def cryptography_has_102_verification_params(): + return [ + "X509_V_FLAG_SUITEB_128_LOS_ONLY", + "X509_V_FLAG_SUITEB_192_LOS", + "X509_V_FLAG_SUITEB_128_LOS", + "X509_VERIFY_PARAM_set1_host", + "X509_VERIFY_PARAM_set1_email", + "X509_VERIFY_PARAM_set1_ip", + "X509_VERIFY_PARAM_set1_ip_asc", + "X509_VERIFY_PARAM_set_hostflags", + ] + + +def cryptography_has_x509_v_flag_trusted_first(): + return [ + "X509_V_FLAG_TRUSTED_FIRST", + ] + + +def cryptography_has_x509_v_flag_partial_chain(): + return [ + "X509_V_FLAG_PARTIAL_CHAIN", + ] + + +def cryptography_has_set_cert_cb(): + return [ + "SSL_CTX_set_cert_cb", + "SSL_set_cert_cb", + ] + + +def cryptography_has_ssl_st(): + return [ + "SSL_ST_BEFORE", + "SSL_ST_OK", + "SSL_ST_INIT", + "SSL_ST_RENEGOTIATE", + ] + + +def cryptography_has_tls_st(): + return [ + "TLS_ST_BEFORE", + "TLS_ST_OK", + ] + + +def cryptography_has_locking_callbacks(): + return [ + "CRYPTO_LOCK", + "CRYPTO_UNLOCK", + "CRYPTO_READ", + "CRYPTO_LOCK_SSL", + "CRYPTO_lock", + ] + + +def cryptography_has_scrypt(): + return [ + "EVP_PBE_scrypt", + ] + + +def cryptography_has_generic_dtls_method(): + return [ + "DTLS_method", + "DTLS_server_method", + "DTLS_client_method", + "SSL_OP_NO_DTLSv1", + "SSL_OP_NO_DTLSv1_2", + "DTLS_set_link_mtu", + "DTLS_get_link_min_mtu", + ] + + +def cryptography_has_evp_pkey_dhx(): + return [ + "EVP_PKEY_DHX", + ] + + +def cryptography_has_mem_functions(): + return [ + "Cryptography_CRYPTO_set_mem_functions", + ] + + +def cryptography_has_sct(): + return [ + "SCT_get_version", + "SCT_get_log_entry_type", + "SCT_get0_log_id", + "SCT_get_timestamp", + "SCT_set_source", + "sk_SCT_num", + "sk_SCT_value", + "SCT_LIST_free", + ] + + +def cryptography_has_x509_store_ctx_get_issuer(): + return [ + "X509_STORE_get_get_issuer", + "X509_STORE_set_get_issuer", + ] + + +def cryptography_has_x25519(): + return [ + "EVP_PKEY_X25519", + "NID_X25519", + ] + + +def cryptography_has_evp_pkey_get_set_tls_encodedpoint(): + return [ + "EVP_PKEY_get1_tls_encodedpoint", + "EVP_PKEY_set1_tls_encodedpoint", + ] + + +def cryptography_has_fips(): + return [ + "FIPS_set_mode", + "FIPS_mode", + ] + + +def cryptography_has_ssl_sigalgs(): + return [ + "SSL_CTX_set1_sigalgs_list", + "SSL_get_sigalgs", + ] + + +def cryptography_has_psk(): + return [ + "SSL_CTX_use_psk_identity_hint", + "SSL_CTX_set_psk_server_callback", + "SSL_CTX_set_psk_client_callback", + ] + + +def cryptography_has_custom_ext(): + return [ + "SSL_CTX_add_client_custom_ext", + "SSL_CTX_add_server_custom_ext", + "SSL_extension_supported", + ] + + +def cryptography_has_openssl_cleanup(): + return [ + "OPENSSL_cleanup", + ] + + +# This is a mapping of +# {condition: function-returning-names-dependent-on-that-condition} so we can +# loop over them and delete unsupported names at runtime. It will be removed +# when cffi supports #if in cdef. We use functions instead of just a dict of +# lists so we can use coverage to measure which are used. +CONDITIONAL_NAMES = { + "Cryptography_HAS_EC2M": cryptography_has_ec2m, + "Cryptography_HAS_EC_1_0_2": cryptography_has_ec_1_0_2, + "Cryptography_HAS_SET_ECDH_AUTO": cryptography_has_set_ecdh_auto, + "Cryptography_HAS_RSA_R_PKCS_DECODING_ERROR": ( + cryptography_has_rsa_r_pkcs_decoding_error + ), + "Cryptography_HAS_RSA_OAEP_MD": cryptography_has_rsa_oaep_md, + "Cryptography_HAS_RSA_OAEP_LABEL": cryptography_has_rsa_oaep_label, + "Cryptography_HAS_SSL3_METHOD": cryptography_has_ssl3_method, + "Cryptography_HAS_ALPN": cryptography_has_alpn, + "Cryptography_HAS_COMPRESSION": cryptography_has_compression, + "Cryptography_HAS_GET_SERVER_TMP_KEY": cryptography_has_get_server_tmp_key, + "Cryptography_HAS_102_VERIFICATION_ERROR_CODES": ( + cryptography_has_102_verification_error_codes + ), + "Cryptography_HAS_102_VERIFICATION_PARAMS": ( + cryptography_has_102_verification_params + ), + "Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST": ( + cryptography_has_x509_v_flag_trusted_first + ), + "Cryptography_HAS_X509_V_FLAG_PARTIAL_CHAIN": ( + cryptography_has_x509_v_flag_partial_chain + ), + "Cryptography_HAS_SET_CERT_CB": cryptography_has_set_cert_cb, + "Cryptography_HAS_SSL_ST": cryptography_has_ssl_st, + "Cryptography_HAS_TLS_ST": cryptography_has_tls_st, + "Cryptography_HAS_LOCKING_CALLBACKS": cryptography_has_locking_callbacks, + "Cryptography_HAS_SCRYPT": cryptography_has_scrypt, + "Cryptography_HAS_GENERIC_DTLS_METHOD": ( + cryptography_has_generic_dtls_method + ), + "Cryptography_HAS_EVP_PKEY_DHX": cryptography_has_evp_pkey_dhx, + "Cryptography_HAS_MEM_FUNCTIONS": cryptography_has_mem_functions, + "Cryptography_HAS_SCT": cryptography_has_sct, + "Cryptography_HAS_X509_STORE_CTX_GET_ISSUER": ( + cryptography_has_x509_store_ctx_get_issuer + ), + "Cryptography_HAS_X25519": cryptography_has_x25519, + "Cryptography_HAS_EVP_PKEY_get_set_tls_encodedpoint": ( + cryptography_has_evp_pkey_get_set_tls_encodedpoint + ), + "Cryptography_HAS_FIPS": cryptography_has_fips, + "Cryptography_HAS_SIGALGS": cryptography_has_ssl_sigalgs, + "Cryptography_HAS_PSK": cryptography_has_psk, + "Cryptography_HAS_CUSTOM_EXT": cryptography_has_custom_ext, + "Cryptography_HAS_OPENSSL_CLEANUP": cryptography_has_openssl_cleanup, +} diff --git a/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/binding.py b/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/binding.py new file mode 100644 index 0000000..81cf547 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/bindings/openssl/binding.py @@ -0,0 +1,157 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import collections +import threading +import types + +from cryptography import utils +from cryptography.exceptions import InternalError +from cryptography.hazmat.bindings._openssl import ffi, lib +from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES + +_OpenSSLErrorWithText = collections.namedtuple( + "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"] +) + + +class _OpenSSLError(object): + def __init__(self, code, lib, func, reason): + self._code = code + self._lib = lib + self._func = func + self._reason = reason + + def _lib_reason_match(self, lib, reason): + return lib == self.lib and reason == self.reason + + code = utils.read_only_property("_code") + lib = utils.read_only_property("_lib") + func = utils.read_only_property("_func") + reason = utils.read_only_property("_reason") + + +def _consume_errors(lib): + errors = [] + while True: + code = lib.ERR_get_error() + if code == 0: + break + + err_lib = lib.ERR_GET_LIB(code) + err_func = lib.ERR_GET_FUNC(code) + err_reason = lib.ERR_GET_REASON(code) + + errors.append(_OpenSSLError(code, err_lib, err_func, err_reason)) + + return errors + + +def _openssl_assert(lib, ok): + if not ok: + errors = _consume_errors(lib) + errors_with_text = [] + for err in errors: + buf = ffi.new("char[]", 256) + lib.ERR_error_string_n(err.code, buf, len(buf)) + err_text_reason = ffi.string(buf) + + errors_with_text.append( + _OpenSSLErrorWithText( + err.code, err.lib, err.func, err.reason, err_text_reason + ) + ) + + raise InternalError( + "Unknown OpenSSL error. This error is commonly encountered when " + "another library is not cleaning up the OpenSSL error stack. If " + "you are using cryptography with another library that uses " + "OpenSSL try disabling it before reporting a bug. Otherwise " + "please file an issue at https://github.com/pyca/cryptography/" + "issues with information on how to reproduce " + "this. ({0!r})".format(errors_with_text), + errors_with_text + ) + + +def build_conditional_library(lib, conditional_names): + conditional_lib = types.ModuleType("lib") + conditional_lib._original_lib = lib + excluded_names = set() + for condition, names_cb in conditional_names.items(): + if not getattr(lib, condition): + excluded_names.update(names_cb()) + + for attr in dir(lib): + if attr not in excluded_names: + setattr(conditional_lib, attr, getattr(lib, attr)) + + return conditional_lib + + +class Binding(object): + """ + OpenSSL API wrapper. + """ + lib = None + ffi = ffi + _lib_loaded = False + _init_lock = threading.Lock() + _lock_init_lock = threading.Lock() + + def __init__(self): + self._ensure_ffi_initialized() + + @classmethod + def _register_osrandom_engine(cls): + # Clear any errors extant in the queue before we start. In many + # scenarios other things may be interacting with OpenSSL in the same + # process space and it has proven untenable to assume that they will + # reliably clear the error queue. Once we clear it here we will + # error on any subsequent unexpected item in the stack. + cls.lib.ERR_clear_error() + cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id + cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name + result = cls.lib.Cryptography_add_osrandom_engine() + _openssl_assert(cls.lib, result in (1, 2)) + + @classmethod + def _ensure_ffi_initialized(cls): + with cls._init_lock: + if not cls._lib_loaded: + cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES) + cls._lib_loaded = True + # initialize the SSL library + cls.lib.SSL_library_init() + # adds all ciphers/digests for EVP + cls.lib.OpenSSL_add_all_algorithms() + # loads error strings for libcrypto and libssl functions + cls.lib.SSL_load_error_strings() + cls._register_osrandom_engine() + + @classmethod + def init_static_locks(cls): + with cls._lock_init_lock: + cls._ensure_ffi_initialized() + # Use Python's implementation if available, importing _ssl triggers + # the setup for this. + __import__("_ssl") + + if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL: + return + + # If nothing else has setup a locking callback already, we set up + # our own + res = lib.Cryptography_setup_ssl_threads() + _openssl_assert(cls.lib, res == 1) + + +# OpenSSL is not thread safe until the locks are initialized. We call this +# method in module scope so that it executes with the import lock. On +# Pythons < 3.4 this import lock is a global lock, which can prevent a race +# condition registering the OpenSSL locks. On Python 3.4+ the import lock +# is per module so this approach will not work. +Binding.init_static_locks() diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/__init__.py new file mode 100644 index 0000000..4b54088 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/__init__.py @@ -0,0 +1,5 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/__init__.py new file mode 100644 index 0000000..494a7a1 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/__init__.py @@ -0,0 +1,40 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AsymmetricSignatureContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes and returns nothing. + """ + + @abc.abstractmethod + def finalize(self): + """ + Returns the signature as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AsymmetricVerificationContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes and returns nothing. + """ + + @abc.abstractmethod + def verify(self): + """ + Raises an exception if the bytes provided to update do not match the + signature or the signature does not match the public key. + """ diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/dh.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/dh.py new file mode 100644 index 0000000..4fc9952 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/dh.py @@ -0,0 +1,212 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +def generate_parameters(generator, key_size, backend): + return backend.generate_dh_parameters(generator, key_size) + + +class DHPrivateNumbers(object): + def __init__(self, x, public_numbers): + if not isinstance(x, six.integer_types): + raise TypeError("x must be an integer.") + + if not isinstance(public_numbers, DHPublicNumbers): + raise TypeError("public_numbers must be an instance of " + "DHPublicNumbers.") + + self._x = x + self._public_numbers = public_numbers + + def __eq__(self, other): + if not isinstance(other, DHPrivateNumbers): + return NotImplemented + + return ( + self._x == other._x and + self._public_numbers == other._public_numbers + ) + + def __ne__(self, other): + return not self == other + + def private_key(self, backend): + return backend.load_dh_private_numbers(self) + + public_numbers = utils.read_only_property("_public_numbers") + x = utils.read_only_property("_x") + + +class DHPublicNumbers(object): + def __init__(self, y, parameter_numbers): + if not isinstance(y, six.integer_types): + raise TypeError("y must be an integer.") + + if not isinstance(parameter_numbers, DHParameterNumbers): + raise TypeError( + "parameters must be an instance of DHParameterNumbers.") + + self._y = y + self._parameter_numbers = parameter_numbers + + def __eq__(self, other): + if not isinstance(other, DHPublicNumbers): + return NotImplemented + + return ( + self._y == other._y and + self._parameter_numbers == other._parameter_numbers + ) + + def __ne__(self, other): + return not self == other + + def public_key(self, backend): + return backend.load_dh_public_numbers(self) + + y = utils.read_only_property("_y") + parameter_numbers = utils.read_only_property("_parameter_numbers") + + +class DHParameterNumbers(object): + def __init__(self, p, g, q=None): + if ( + not isinstance(p, six.integer_types) or + not isinstance(g, six.integer_types) + ): + raise TypeError("p and g must be integers") + if q is not None and not isinstance(q, six.integer_types): + raise TypeError("q must be integer or None") + + if g < 2: + raise ValueError("DH generator must be 2 or greater") + + self._p = p + self._g = g + self._q = q + + def __eq__(self, other): + if not isinstance(other, DHParameterNumbers): + return NotImplemented + + return ( + self._p == other._p and + self._g == other._g and + self._q == other._q + ) + + def __ne__(self, other): + return not self == other + + def parameters(self, backend): + return backend.load_dh_parameter_numbers(self) + + p = utils.read_only_property("_p") + g = utils.read_only_property("_g") + q = utils.read_only_property("_q") + + +@six.add_metaclass(abc.ABCMeta) +class DHParameters(object): + @abc.abstractmethod + def generate_private_key(self): + """ + Generates and returns a DHPrivateKey. + """ + + @abc.abstractmethod + def parameter_bytes(self, encoding, format): + """ + Returns the parameters serialized as bytes. + """ + + @abc.abstractmethod + def parameter_numbers(self): + """ + Returns a DHParameterNumbers. + """ + + +DHParametersWithSerialization = DHParameters + + +@six.add_metaclass(abc.ABCMeta) +class DHPrivateKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def public_key(self): + """ + The DHPublicKey associated with this private key. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DHParameters object associated with this private key. + """ + + @abc.abstractmethod + def exchange(self, peer_public_key): + """ + Given peer's DHPublicKey, carry out the key exchange and + return shared key as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DHPrivateKeyWithSerialization(DHPrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns a DHPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DHPublicKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DHParameters object associated with this public key. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns a DHPublicNumbers. + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + +DHPublicKeyWithSerialization = DHPublicKey diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/dsa.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/dsa.py new file mode 100644 index 0000000..e380a44 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/dsa.py @@ -0,0 +1,254 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +@six.add_metaclass(abc.ABCMeta) +class DSAParameters(object): + @abc.abstractmethod + def generate_private_key(self): + """ + Generates and returns a DSAPrivateKey. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAParametersWithNumbers(DSAParameters): + @abc.abstractmethod + def parameter_numbers(self): + """ + Returns a DSAParameterNumbers. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAPrivateKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def public_key(self): + """ + The DSAPublicKey associated with this private key. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DSAParameters object associated with this private key. + """ + + @abc.abstractmethod + def signer(self, signature_algorithm): + """ + Returns an AsymmetricSignatureContext used for signing data. + """ + + @abc.abstractmethod + def sign(self, data, algorithm): + """ + Signs the data + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAPrivateKeyWithSerialization(DSAPrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns a DSAPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAPublicKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DSAParameters object associated with this public key. + """ + + @abc.abstractmethod + def verifier(self, signature, signature_algorithm): + """ + Returns an AsymmetricVerificationContext used for signing data. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns a DSAPublicNumbers. + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify(self, signature, data, algorithm): + """ + Verifies the signature of the data. + """ + + +DSAPublicKeyWithSerialization = DSAPublicKey + + +def generate_parameters(key_size, backend): + return backend.generate_dsa_parameters(key_size) + + +def generate_private_key(key_size, backend): + return backend.generate_dsa_private_key_and_parameters(key_size) + + +def _check_dsa_parameters(parameters): + if parameters.p.bit_length() not in [1024, 2048, 3072]: + raise ValueError("p must be exactly 1024, 2048, or 3072 bits long") + if parameters.q.bit_length() not in [160, 224, 256]: + raise ValueError("q must be exactly 160, 224, or 256 bits long") + + if not (1 < parameters.g < parameters.p): + raise ValueError("g, p don't satisfy 1 < g < p.") + + +def _check_dsa_private_numbers(numbers): + parameters = numbers.public_numbers.parameter_numbers + _check_dsa_parameters(parameters) + if numbers.x <= 0 or numbers.x >= parameters.q: + raise ValueError("x must be > 0 and < q.") + + if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p): + raise ValueError("y must be equal to (g ** x % p).") + + +class DSAParameterNumbers(object): + def __init__(self, p, q, g): + if ( + not isinstance(p, six.integer_types) or + not isinstance(q, six.integer_types) or + not isinstance(g, six.integer_types) + ): + raise TypeError( + "DSAParameterNumbers p, q, and g arguments must be integers." + ) + + self._p = p + self._q = q + self._g = g + + p = utils.read_only_property("_p") + q = utils.read_only_property("_q") + g = utils.read_only_property("_g") + + def parameters(self, backend): + return backend.load_dsa_parameter_numbers(self) + + def __eq__(self, other): + if not isinstance(other, DSAParameterNumbers): + return NotImplemented + + return self.p == other.p and self.q == other.q and self.g == other.g + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return ( + "".format( + self=self + ) + ) + + +class DSAPublicNumbers(object): + def __init__(self, y, parameter_numbers): + if not isinstance(y, six.integer_types): + raise TypeError("DSAPublicNumbers y argument must be an integer.") + + if not isinstance(parameter_numbers, DSAParameterNumbers): + raise TypeError( + "parameter_numbers must be a DSAParameterNumbers instance." + ) + + self._y = y + self._parameter_numbers = parameter_numbers + + y = utils.read_only_property("_y") + parameter_numbers = utils.read_only_property("_parameter_numbers") + + def public_key(self, backend): + return backend.load_dsa_public_numbers(self) + + def __eq__(self, other): + if not isinstance(other, DSAPublicNumbers): + return NotImplemented + + return ( + self.y == other.y and + self.parameter_numbers == other.parameter_numbers + ) + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return ( + "".format(self=self) + ) + + +class DSAPrivateNumbers(object): + def __init__(self, x, public_numbers): + if not isinstance(x, six.integer_types): + raise TypeError("DSAPrivateNumbers x argument must be an integer.") + + if not isinstance(public_numbers, DSAPublicNumbers): + raise TypeError( + "public_numbers must be a DSAPublicNumbers instance." + ) + self._public_numbers = public_numbers + self._x = x + + x = utils.read_only_property("_x") + public_numbers = utils.read_only_property("_public_numbers") + + def private_key(self, backend): + return backend.load_dsa_private_numbers(self) + + def __eq__(self, other): + if not isinstance(other, DSAPrivateNumbers): + return NotImplemented + + return ( + self.x == other.x and self.public_numbers == other.public_numbers + ) + + def __ne__(self, other): + return not self == other diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/ec.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/ec.py new file mode 100644 index 0000000..6cbfcab --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/ec.py @@ -0,0 +1,411 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurve(object): + @abc.abstractproperty + def name(self): + """ + The name of the curve. e.g. secp256r1. + """ + + @abc.abstractproperty + def key_size(self): + """ + Bit size of a secret scalar for the curve. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurveSignatureAlgorithm(object): + @abc.abstractproperty + def algorithm(self): + """ + The digest algorithm used with this signature. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurvePrivateKey(object): + @abc.abstractmethod + def signer(self, signature_algorithm): + """ + Returns an AsymmetricSignatureContext used for signing data. + """ + + @abc.abstractmethod + def exchange(self, algorithm, peer_public_key): + """ + Performs a key exchange operation using the provided algorithm with the + provided peer's public key. + """ + + @abc.abstractmethod + def public_key(self): + """ + The EllipticCurvePublicKey for this private key. + """ + + @abc.abstractproperty + def curve(self): + """ + The EllipticCurve that this key is on. + """ + + @abc.abstractproperty + def key_size(self): + """ + Bit size of a secret scalar for the curve. + """ + + @abc.abstractproperty + def sign(self, data, signature_algorithm): + """ + Signs the data + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurvePrivateKeyWithSerialization(EllipticCurvePrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns an EllipticCurvePrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurvePublicKey(object): + @abc.abstractmethod + def verifier(self, signature, signature_algorithm): + """ + Returns an AsymmetricVerificationContext used for signing data. + """ + + @abc.abstractproperty + def curve(self): + """ + The EllipticCurve that this key is on. + """ + + @abc.abstractproperty + def key_size(self): + """ + Bit size of a secret scalar for the curve. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns an EllipticCurvePublicNumbers. + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify(self, signature, data, signature_algorithm): + """ + Verifies the signature of the data. + """ + + +EllipticCurvePublicKeyWithSerialization = EllipticCurvePublicKey + + +@utils.register_interface(EllipticCurve) +class SECT571R1(object): + name = "sect571r1" + key_size = 570 + + +@utils.register_interface(EllipticCurve) +class SECT409R1(object): + name = "sect409r1" + key_size = 409 + + +@utils.register_interface(EllipticCurve) +class SECT283R1(object): + name = "sect283r1" + key_size = 283 + + +@utils.register_interface(EllipticCurve) +class SECT233R1(object): + name = "sect233r1" + key_size = 233 + + +@utils.register_interface(EllipticCurve) +class SECT163R2(object): + name = "sect163r2" + key_size = 163 + + +@utils.register_interface(EllipticCurve) +class SECT571K1(object): + name = "sect571k1" + key_size = 571 + + +@utils.register_interface(EllipticCurve) +class SECT409K1(object): + name = "sect409k1" + key_size = 409 + + +@utils.register_interface(EllipticCurve) +class SECT283K1(object): + name = "sect283k1" + key_size = 283 + + +@utils.register_interface(EllipticCurve) +class SECT233K1(object): + name = "sect233k1" + key_size = 233 + + +@utils.register_interface(EllipticCurve) +class SECT163K1(object): + name = "sect163k1" + key_size = 163 + + +@utils.register_interface(EllipticCurve) +class SECP521R1(object): + name = "secp521r1" + key_size = 521 + + +@utils.register_interface(EllipticCurve) +class SECP384R1(object): + name = "secp384r1" + key_size = 384 + + +@utils.register_interface(EllipticCurve) +class SECP256R1(object): + name = "secp256r1" + key_size = 256 + + +@utils.register_interface(EllipticCurve) +class SECP256K1(object): + name = "secp256k1" + key_size = 256 + + +@utils.register_interface(EllipticCurve) +class SECP224R1(object): + name = "secp224r1" + key_size = 224 + + +@utils.register_interface(EllipticCurve) +class SECP192R1(object): + name = "secp192r1" + key_size = 192 + + +@utils.register_interface(EllipticCurve) +class BrainpoolP256R1(object): + name = "brainpoolP256r1" + key_size = 256 + + +@utils.register_interface(EllipticCurve) +class BrainpoolP384R1(object): + name = "brainpoolP384r1" + key_size = 384 + + +@utils.register_interface(EllipticCurve) +class BrainpoolP512R1(object): + name = "brainpoolP512r1" + key_size = 512 + + +_CURVE_TYPES = { + "prime192v1": SECP192R1, + "prime256v1": SECP256R1, + + "secp192r1": SECP192R1, + "secp224r1": SECP224R1, + "secp256r1": SECP256R1, + "secp384r1": SECP384R1, + "secp521r1": SECP521R1, + "secp256k1": SECP256K1, + + "sect163k1": SECT163K1, + "sect233k1": SECT233K1, + "sect283k1": SECT283K1, + "sect409k1": SECT409K1, + "sect571k1": SECT571K1, + + "sect163r2": SECT163R2, + "sect233r1": SECT233R1, + "sect283r1": SECT283R1, + "sect409r1": SECT409R1, + "sect571r1": SECT571R1, + + "brainpoolP256r1": BrainpoolP256R1, + "brainpoolP384r1": BrainpoolP384R1, + "brainpoolP512r1": BrainpoolP512R1, +} + + +@utils.register_interface(EllipticCurveSignatureAlgorithm) +class ECDSA(object): + def __init__(self, algorithm): + self._algorithm = algorithm + + algorithm = utils.read_only_property("_algorithm") + + +def generate_private_key(curve, backend): + return backend.generate_elliptic_curve_private_key(curve) + + +def derive_private_key(private_value, curve, backend): + if not isinstance(private_value, six.integer_types): + raise TypeError("private_value must be an integer type.") + + if private_value <= 0: + raise ValueError("private_value must be a positive integer.") + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must provide the EllipticCurve interface.") + + return backend.derive_elliptic_curve_private_key(private_value, curve) + + +class EllipticCurvePublicNumbers(object): + def __init__(self, x, y, curve): + if ( + not isinstance(x, six.integer_types) or + not isinstance(y, six.integer_types) + ): + raise TypeError("x and y must be integers.") + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must provide the EllipticCurve interface.") + + self._y = y + self._x = x + self._curve = curve + + def public_key(self, backend): + return backend.load_elliptic_curve_public_numbers(self) + + def encode_point(self): + # key_size is in bits. Convert to bytes and round up + byte_length = (self.curve.key_size + 7) // 8 + return ( + b'\x04' + utils.int_to_bytes(self.x, byte_length) + + utils.int_to_bytes(self.y, byte_length) + ) + + @classmethod + def from_encoded_point(cls, curve, data): + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must be an EllipticCurve instance") + + if data.startswith(b'\x04'): + # key_size is in bits. Convert to bytes and round up + byte_length = (curve.key_size + 7) // 8 + if len(data) == 2 * byte_length + 1: + x = utils.int_from_bytes(data[1:byte_length + 1], 'big') + y = utils.int_from_bytes(data[byte_length + 1:], 'big') + return cls(x, y, curve) + else: + raise ValueError('Invalid elliptic curve point data length') + else: + raise ValueError('Unsupported elliptic curve point type') + + curve = utils.read_only_property("_curve") + x = utils.read_only_property("_x") + y = utils.read_only_property("_y") + + def __eq__(self, other): + if not isinstance(other, EllipticCurvePublicNumbers): + return NotImplemented + + return ( + self.x == other.x and + self.y == other.y and + self.curve.name == other.curve.name and + self.curve.key_size == other.curve.key_size + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.x, self.y, self.curve.name, self.curve.key_size)) + + def __repr__(self): + return ( + "".format(self) + ) + + +class EllipticCurvePrivateNumbers(object): + def __init__(self, private_value, public_numbers): + if not isinstance(private_value, six.integer_types): + raise TypeError("private_value must be an integer.") + + if not isinstance(public_numbers, EllipticCurvePublicNumbers): + raise TypeError( + "public_numbers must be an EllipticCurvePublicNumbers " + "instance." + ) + + self._private_value = private_value + self._public_numbers = public_numbers + + def private_key(self, backend): + return backend.load_elliptic_curve_private_numbers(self) + + private_value = utils.read_only_property("_private_value") + public_numbers = utils.read_only_property("_public_numbers") + + def __eq__(self, other): + if not isinstance(other, EllipticCurvePrivateNumbers): + return NotImplemented + + return ( + self.private_value == other.private_value and + self.public_numbers == other.public_numbers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.private_value, self.public_numbers)) + + +class ECDH(object): + pass diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/padding.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/padding.py new file mode 100644 index 0000000..a37c3f9 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/padding.py @@ -0,0 +1,79 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import math + +import six + +from cryptography import utils +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import rsa + + +@six.add_metaclass(abc.ABCMeta) +class AsymmetricPadding(object): + @abc.abstractproperty + def name(self): + """ + A string naming this padding (e.g. "PSS", "PKCS1"). + """ + + +@utils.register_interface(AsymmetricPadding) +class PKCS1v15(object): + name = "EMSA-PKCS1-v1_5" + + +@utils.register_interface(AsymmetricPadding) +class PSS(object): + MAX_LENGTH = object() + name = "EMSA-PSS" + + def __init__(self, mgf, salt_length): + self._mgf = mgf + + if (not isinstance(salt_length, six.integer_types) and + salt_length is not self.MAX_LENGTH): + raise TypeError("salt_length must be an integer.") + + if salt_length is not self.MAX_LENGTH and salt_length < 0: + raise ValueError("salt_length must be zero or greater.") + + self._salt_length = salt_length + + +@utils.register_interface(AsymmetricPadding) +class OAEP(object): + name = "EME-OAEP" + + def __init__(self, mgf, algorithm, label): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + + self._mgf = mgf + self._algorithm = algorithm + self._label = label + + +class MGF1(object): + MAX_LENGTH = object() + + def __init__(self, algorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + + self._algorithm = algorithm + + +def calculate_max_pss_salt_length(key, hash_algorithm): + if not isinstance(key, (rsa.RSAPrivateKey, rsa.RSAPublicKey)): + raise TypeError("key must be an RSA public or private key") + # bit length - 1 per RFC 3447 + emlen = int(math.ceil((key.key_size - 1) / 8.0)) + salt_length = emlen - hash_algorithm.digest_size - 2 + assert salt_length >= 0 + return salt_length diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/rsa.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/rsa.py new file mode 100644 index 0000000..27db671 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/rsa.py @@ -0,0 +1,368 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +try: + # Only available in math in 3.5+ + from math import gcd +except ImportError: + from fractions import gcd + +import six + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.backends.interfaces import RSABackend + + +@six.add_metaclass(abc.ABCMeta) +class RSAPrivateKey(object): + @abc.abstractmethod + def signer(self, padding, algorithm): + """ + Returns an AsymmetricSignatureContext used for signing data. + """ + + @abc.abstractmethod + def decrypt(self, ciphertext, padding): + """ + Decrypts the provided ciphertext. + """ + + @abc.abstractproperty + def key_size(self): + """ + The bit length of the public modulus. + """ + + @abc.abstractmethod + def public_key(self): + """ + The RSAPublicKey associated with this private key. + """ + + @abc.abstractmethod + def sign(self, data, padding, algorithm): + """ + Signs the data. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RSAPrivateKeyWithSerialization(RSAPrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns an RSAPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RSAPublicKey(object): + @abc.abstractmethod + def verifier(self, signature, padding, algorithm): + """ + Returns an AsymmetricVerificationContext used for verifying signatures. + """ + + @abc.abstractmethod + def encrypt(self, plaintext, padding): + """ + Encrypts the given plaintext. + """ + + @abc.abstractproperty + def key_size(self): + """ + The bit length of the public modulus. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns an RSAPublicNumbers + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify(self, signature, data, padding, algorithm): + """ + Verifies the signature of the data. + """ + + +RSAPublicKeyWithSerialization = RSAPublicKey + + +def generate_private_key(public_exponent, key_size, backend): + if not isinstance(backend, RSABackend): + raise UnsupportedAlgorithm( + "Backend object does not implement RSABackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + _verify_rsa_parameters(public_exponent, key_size) + return backend.generate_rsa_private_key(public_exponent, key_size) + + +def _verify_rsa_parameters(public_exponent, key_size): + if public_exponent < 3: + raise ValueError("public_exponent must be >= 3.") + + if public_exponent & 1 == 0: + raise ValueError("public_exponent must be odd.") + + if key_size < 512: + raise ValueError("key_size must be at least 512-bits.") + + +def _check_private_key_components(p, q, private_exponent, dmp1, dmq1, iqmp, + public_exponent, modulus): + if modulus < 3: + raise ValueError("modulus must be >= 3.") + + if p >= modulus: + raise ValueError("p must be < modulus.") + + if q >= modulus: + raise ValueError("q must be < modulus.") + + if dmp1 >= modulus: + raise ValueError("dmp1 must be < modulus.") + + if dmq1 >= modulus: + raise ValueError("dmq1 must be < modulus.") + + if iqmp >= modulus: + raise ValueError("iqmp must be < modulus.") + + if private_exponent >= modulus: + raise ValueError("private_exponent must be < modulus.") + + if public_exponent < 3 or public_exponent >= modulus: + raise ValueError("public_exponent must be >= 3 and < modulus.") + + if public_exponent & 1 == 0: + raise ValueError("public_exponent must be odd.") + + if dmp1 & 1 == 0: + raise ValueError("dmp1 must be odd.") + + if dmq1 & 1 == 0: + raise ValueError("dmq1 must be odd.") + + if p * q != modulus: + raise ValueError("p*q must equal modulus.") + + +def _check_public_key_components(e, n): + if n < 3: + raise ValueError("n must be >= 3.") + + if e < 3 or e >= n: + raise ValueError("e must be >= 3 and < n.") + + if e & 1 == 0: + raise ValueError("e must be odd.") + + +def _modinv(e, m): + """ + Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1 + """ + x1, y1, x2, y2 = 1, 0, 0, 1 + a, b = e, m + while b > 0: + q, r = divmod(a, b) + xn, yn = x1 - q * x2, y1 - q * y2 + a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn + return x1 % m + + +def rsa_crt_iqmp(p, q): + """ + Compute the CRT (q ** -1) % p value from RSA primes p and q. + """ + return _modinv(q, p) + + +def rsa_crt_dmp1(private_exponent, p): + """ + Compute the CRT private_exponent % (p - 1) value from the RSA + private_exponent (d) and p. + """ + return private_exponent % (p - 1) + + +def rsa_crt_dmq1(private_exponent, q): + """ + Compute the CRT private_exponent % (q - 1) value from the RSA + private_exponent (d) and q. + """ + return private_exponent % (q - 1) + + +# Controls the number of iterations rsa_recover_prime_factors will perform +# to obtain the prime factors. Each iteration increments by 2 so the actual +# maximum attempts is half this number. +_MAX_RECOVERY_ATTEMPTS = 1000 + + +def rsa_recover_prime_factors(n, e, d): + """ + Compute factors p and q from the private exponent d. We assume that n has + no more than two factors. This function is adapted from code in PyCrypto. + """ + # See 8.2.2(i) in Handbook of Applied Cryptography. + ktot = d * e - 1 + # The quantity d*e-1 is a multiple of phi(n), even, + # and can be represented as t*2^s. + t = ktot + while t % 2 == 0: + t = t // 2 + # Cycle through all multiplicative inverses in Zn. + # The algorithm is non-deterministic, but there is a 50% chance + # any candidate a leads to successful factoring. + # See "Digitalized Signatures and Public Key Functions as Intractable + # as Factorization", M. Rabin, 1979 + spotted = False + a = 2 + while not spotted and a < _MAX_RECOVERY_ATTEMPTS: + k = t + # Cycle through all values a^{t*2^i}=a^k + while k < ktot: + cand = pow(a, k, n) + # Check if a^k is a non-trivial root of unity (mod n) + if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1: + # We have found a number such that (cand-1)(cand+1)=0 (mod n). + # Either of the terms divides n. + p = gcd(cand + 1, n) + spotted = True + break + k *= 2 + # This value was not any good... let's try another! + a += 2 + if not spotted: + raise ValueError("Unable to compute factors p and q from exponent d.") + # Found ! + q, r = divmod(n, p) + assert r == 0 + p, q = sorted((p, q), reverse=True) + return (p, q) + + +class RSAPrivateNumbers(object): + def __init__(self, p, q, d, dmp1, dmq1, iqmp, + public_numbers): + if ( + not isinstance(p, six.integer_types) or + not isinstance(q, six.integer_types) or + not isinstance(d, six.integer_types) or + not isinstance(dmp1, six.integer_types) or + not isinstance(dmq1, six.integer_types) or + not isinstance(iqmp, six.integer_types) + ): + raise TypeError( + "RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must" + " all be an integers." + ) + + if not isinstance(public_numbers, RSAPublicNumbers): + raise TypeError( + "RSAPrivateNumbers public_numbers must be an RSAPublicNumbers" + " instance." + ) + + self._p = p + self._q = q + self._d = d + self._dmp1 = dmp1 + self._dmq1 = dmq1 + self._iqmp = iqmp + self._public_numbers = public_numbers + + p = utils.read_only_property("_p") + q = utils.read_only_property("_q") + d = utils.read_only_property("_d") + dmp1 = utils.read_only_property("_dmp1") + dmq1 = utils.read_only_property("_dmq1") + iqmp = utils.read_only_property("_iqmp") + public_numbers = utils.read_only_property("_public_numbers") + + def private_key(self, backend): + return backend.load_rsa_private_numbers(self) + + def __eq__(self, other): + if not isinstance(other, RSAPrivateNumbers): + return NotImplemented + + return ( + self.p == other.p and + self.q == other.q and + self.d == other.d and + self.dmp1 == other.dmp1 and + self.dmq1 == other.dmq1 and + self.iqmp == other.iqmp and + self.public_numbers == other.public_numbers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(( + self.p, + self.q, + self.d, + self.dmp1, + self.dmq1, + self.iqmp, + self.public_numbers, + )) + + +class RSAPublicNumbers(object): + def __init__(self, e, n): + if ( + not isinstance(e, six.integer_types) or + not isinstance(n, six.integer_types) + ): + raise TypeError("RSAPublicNumbers arguments must be integers.") + + self._e = e + self._n = n + + e = utils.read_only_property("_e") + n = utils.read_only_property("_n") + + def public_key(self, backend): + return backend.load_rsa_public_numbers(self) + + def __repr__(self): + return "".format(self) + + def __eq__(self, other): + if not isinstance(other, RSAPublicNumbers): + return NotImplemented + + return self.e == other.e and self.n == other.n + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.e, self.n)) diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/utils.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/utils.py new file mode 100644 index 0000000..ef1e7eb --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/utils.py @@ -0,0 +1,60 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import warnings + +from asn1crypto.algos import DSASignature + +import six + +from cryptography import utils +from cryptography.hazmat.primitives import hashes + + +def decode_rfc6979_signature(signature): + warnings.warn( + "decode_rfc6979_signature is deprecated and will " + "be removed in a future version, use decode_dss_signature instead.", + utils.PersistentlyDeprecated, + stacklevel=2 + ) + return decode_dss_signature(signature) + + +def decode_dss_signature(signature): + data = DSASignature.load(signature, strict=True).native + return data['r'], data['s'] + + +def encode_rfc6979_signature(r, s): + warnings.warn( + "encode_rfc6979_signature is deprecated and will " + "be removed in a future version, use encode_dss_signature instead.", + utils.PersistentlyDeprecated, + stacklevel=2 + ) + return encode_dss_signature(r, s) + + +def encode_dss_signature(r, s): + if ( + not isinstance(r, six.integer_types) or + not isinstance(s, six.integer_types) + ): + raise ValueError("Both r and s must be integers") + + return DSASignature({'r': r, 's': s}).dump() + + +class Prehashed(object): + def __init__(self, algorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of HashAlgorithm.") + + self._algorithm = algorithm + self._digest_size = algorithm.digest_size + + digest_size = utils.read_only_property("_digest_size") diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/x25519.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/x25519.py new file mode 100644 index 0000000..5c4652a --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/asymmetric/x25519.py @@ -0,0 +1,54 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons + + +@six.add_metaclass(abc.ABCMeta) +class X25519PublicKey(object): + @classmethod + def from_public_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + return backend.x25519_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes(self): + pass + + +@six.add_metaclass(abc.ABCMeta) +class X25519PrivateKey(object): + @classmethod + def generate(cls): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + return backend.x25519_generate_key() + + @classmethod + def _from_private_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + return backend.x25519_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self): + pass + + @abc.abstractmethod + def exchange(self, peer_public_key): + pass diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/__init__.py new file mode 100644 index 0000000..171b1c6 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/__init__.py @@ -0,0 +1,21 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.hazmat.primitives.ciphers.base import ( + AEADCipherContext, AEADDecryptionContext, AEADEncryptionContext, + BlockCipherAlgorithm, Cipher, CipherAlgorithm, CipherContext +) + + +__all__ = [ + "Cipher", + "CipherAlgorithm", + "BlockCipherAlgorithm", + "CipherContext", + "AEADCipherContext", + "AEADDecryptionContext", + "AEADEncryptionContext", +] diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/aead.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/aead.py new file mode 100644 index 0000000..e519765 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/aead.py @@ -0,0 +1,188 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import os + +from cryptography import exceptions, utils +from cryptography.hazmat.backends.openssl import aead +from cryptography.hazmat.backends.openssl.backend import backend + + +class ChaCha20Poly1305(object): + _MAX_SIZE = 2 ** 32 + + def __init__(self, key): + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "ChaCha20Poly1305 is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER + ) + utils._check_bytes("key", key) + + if len(key) != 32: + raise ValueError("ChaCha20Poly1305 key must be 32 bytes.") + + self._key = key + + @classmethod + def generate_key(cls): + return os.urandom(32) + + def encrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**32 bytes" + ) + + self._check_params(nonce, data, associated_data) + return aead._encrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def decrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def _check_params(self, nonce, data, associated_data): + utils._check_bytes("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if len(nonce) != 12: + raise ValueError("Nonce must be 12 bytes") + + +class AESCCM(object): + _MAX_SIZE = 2 ** 32 + + def __init__(self, key, tag_length=16): + utils._check_bytes("key", key) + if len(key) not in (16, 24, 32): + raise ValueError("AESCCM key must be 128, 192, or 256 bits.") + + self._key = key + if not isinstance(tag_length, int): + raise TypeError("tag_length must be an integer") + + if tag_length not in (4, 6, 8, 12, 14, 16): + raise ValueError("Invalid tag_length") + + self._tag_length = tag_length + + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "AESCCM is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER + ) + + @classmethod + def generate_key(cls, bit_length): + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (128, 192, 256): + raise ValueError("bit_length must be 128, 192, or 256") + + return os.urandom(bit_length // 8) + + def encrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**32 bytes" + ) + + self._check_params(nonce, data, associated_data) + self._validate_lengths(nonce, len(data)) + return aead._encrypt( + backend, self, nonce, data, associated_data, self._tag_length + ) + + def decrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt( + backend, self, nonce, data, associated_data, self._tag_length + ) + + def _validate_lengths(self, nonce, data_len): + # For information about computing this, see + # https://tools.ietf.org/html/rfc3610#section-2.1 + l_val = 15 - len(nonce) + if 2 ** (8 * l_val) < data_len: + raise ValueError("Nonce too long for data") + + def _check_params(self, nonce, data, associated_data): + utils._check_bytes("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if not 7 <= len(nonce) <= 13: + raise ValueError("Nonce must be between 7 and 13 bytes") + + +class AESGCM(object): + _MAX_SIZE = 2 ** 32 + + def __init__(self, key): + utils._check_bytes("key", key) + if len(key) not in (16, 24, 32): + raise ValueError("AESGCM key must be 128, 192, or 256 bits.") + + self._key = key + + @classmethod + def generate_key(cls, bit_length): + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (128, 192, 256): + raise ValueError("bit_length must be 128, 192, or 256") + + return os.urandom(bit_length // 8) + + def encrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**32 bytes" + ) + + self._check_params(nonce, data, associated_data) + return aead._encrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def decrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def _check_params(self, nonce, data, associated_data): + utils._check_bytes("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if len(nonce) == 0: + raise ValueError("Nonce must be at least 1 byte") diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/algorithms.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/algorithms.py new file mode 100644 index 0000000..68a9e33 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/algorithms.py @@ -0,0 +1,168 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.hazmat.primitives.ciphers import ( + BlockCipherAlgorithm, CipherAlgorithm +) +from cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce + + +def _verify_key_size(algorithm, key): + # Verify that the key is instance of bytes + utils._check_bytes("key", key) + + # Verify that the key size matches the expected key size + if len(key) * 8 not in algorithm.key_sizes: + raise ValueError("Invalid key size ({0}) for {1}.".format( + len(key) * 8, algorithm.name + )) + return key + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class AES(object): + name = "AES" + block_size = 128 + # 512 added to support AES-256-XTS, which uses 512-bit keys + key_sizes = frozenset([128, 192, 256, 512]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class Camellia(object): + name = "camellia" + block_size = 128 + key_sizes = frozenset([128, 192, 256]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class TripleDES(object): + name = "3DES" + block_size = 64 + key_sizes = frozenset([64, 128, 192]) + + def __init__(self, key): + if len(key) == 8: + key += key + key + elif len(key) == 16: + key += key[:8] + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class Blowfish(object): + name = "Blowfish" + block_size = 64 + key_sizes = frozenset(range(32, 449, 8)) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class CAST5(object): + name = "CAST5" + block_size = 64 + key_sizes = frozenset(range(40, 129, 8)) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(CipherAlgorithm) +class ARC4(object): + name = "RC4" + key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(CipherAlgorithm) +class IDEA(object): + name = "IDEA" + block_size = 64 + key_sizes = frozenset([128]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class SEED(object): + name = "SEED" + block_size = 128 + key_sizes = frozenset([128]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(CipherAlgorithm) +@utils.register_interface(ModeWithNonce) +class ChaCha20(object): + name = "ChaCha20" + key_sizes = frozenset([256]) + + def __init__(self, key, nonce): + self.key = _verify_key_size(self, key) + if not isinstance(nonce, bytes): + raise TypeError("nonce must be bytes") + + if len(nonce) != 16: + raise ValueError("nonce must be 128-bits (16 bytes)") + + self._nonce = nonce + + nonce = utils.read_only_property("_nonce") + + @property + def key_size(self): + return len(self.key) * 8 diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/base.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/base.py new file mode 100644 index 0000000..f857041 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/base.py @@ -0,0 +1,235 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, AlreadyUpdated, NotYetFinalized, UnsupportedAlgorithm, + _Reasons +) +from cryptography.hazmat.backends.interfaces import CipherBackend +from cryptography.hazmat.primitives.ciphers import modes + + +@six.add_metaclass(abc.ABCMeta) +class CipherAlgorithm(object): + @abc.abstractproperty + def name(self): + """ + A string naming this mode (e.g. "AES", "Camellia"). + """ + + @abc.abstractproperty + def key_size(self): + """ + The size of the key being used as an integer in bits (e.g. 128, 256). + """ + + +@six.add_metaclass(abc.ABCMeta) +class BlockCipherAlgorithm(object): + @abc.abstractproperty + def block_size(self): + """ + The size of a block as an integer in bits (e.g. 64, 128). + """ + + +@six.add_metaclass(abc.ABCMeta) +class CipherContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes through the cipher and returns the results + as bytes. + """ + + @abc.abstractmethod + def update_into(self, data, buf): + """ + Processes the provided bytes and writes the resulting data into the + provided buffer. Returns the number of bytes written. + """ + + @abc.abstractmethod + def finalize(self): + """ + Returns the results of processing the final block as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AEADCipherContext(object): + @abc.abstractmethod + def authenticate_additional_data(self, data): + """ + Authenticates the provided bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AEADDecryptionContext(object): + @abc.abstractmethod + def finalize_with_tag(self, tag): + """ + Returns the results of processing the final block as bytes and allows + delayed passing of the authentication tag. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AEADEncryptionContext(object): + @abc.abstractproperty + def tag(self): + """ + Returns tag bytes. This is only available after encryption is + finalized. + """ + + +class Cipher(object): + def __init__(self, algorithm, mode, backend): + if not isinstance(backend, CipherBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement CipherBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, CipherAlgorithm): + raise TypeError("Expected interface of CipherAlgorithm.") + + if mode is not None: + mode.validate_for_algorithm(algorithm) + + self.algorithm = algorithm + self.mode = mode + self._backend = backend + + def encryptor(self): + if isinstance(self.mode, modes.ModeWithAuthenticationTag): + if self.mode.tag is not None: + raise ValueError( + "Authentication tag must be None when encrypting." + ) + ctx = self._backend.create_symmetric_encryption_ctx( + self.algorithm, self.mode + ) + return self._wrap_ctx(ctx, encrypt=True) + + def decryptor(self): + ctx = self._backend.create_symmetric_decryption_ctx( + self.algorithm, self.mode + ) + return self._wrap_ctx(ctx, encrypt=False) + + def _wrap_ctx(self, ctx, encrypt): + if isinstance(self.mode, modes.ModeWithAuthenticationTag): + if encrypt: + return _AEADEncryptionContext(ctx) + else: + return _AEADCipherContext(ctx) + else: + return _CipherContext(ctx) + + +@utils.register_interface(CipherContext) +class _CipherContext(object): + def __init__(self, ctx): + self._ctx = ctx + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return self._ctx.update(data) + + def update_into(self, data, buf): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return self._ctx.update_into(data, buf) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize() + self._ctx = None + return data + + +@utils.register_interface(AEADCipherContext) +@utils.register_interface(CipherContext) +@utils.register_interface(AEADDecryptionContext) +class _AEADCipherContext(object): + def __init__(self, ctx): + self._ctx = ctx + self._bytes_processed = 0 + self._aad_bytes_processed = 0 + self._tag = None + self._updated = False + + def _check_limit(self, data_size): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + self._updated = True + self._bytes_processed += data_size + if self._bytes_processed > self._ctx._mode._MAX_ENCRYPTED_BYTES: + raise ValueError( + "{0} has a maximum encrypted byte limit of {1}".format( + self._ctx._mode.name, self._ctx._mode._MAX_ENCRYPTED_BYTES + ) + ) + + def update(self, data): + self._check_limit(len(data)) + return self._ctx.update(data) + + def update_into(self, data, buf): + self._check_limit(len(data)) + return self._ctx.update_into(data, buf) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize() + self._tag = self._ctx.tag + self._ctx = None + return data + + def finalize_with_tag(self, tag): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize_with_tag(tag) + self._tag = self._ctx.tag + self._ctx = None + return data + + def authenticate_additional_data(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if self._updated: + raise AlreadyUpdated("Update has been called on this context.") + + self._aad_bytes_processed += len(data) + if self._aad_bytes_processed > self._ctx._mode._MAX_AAD_BYTES: + raise ValueError( + "{0} has a maximum AAD byte limit of {1}".format( + self._ctx._mode.name, self._ctx._mode._MAX_AAD_BYTES + ) + ) + + self._ctx.authenticate_additional_data(data) + + +@utils.register_interface(AEADEncryptionContext) +class _AEADEncryptionContext(_AEADCipherContext): + @property + def tag(self): + if self._ctx is not None: + raise NotYetFinalized("You must finalize encryption before " + "getting the tag.") + return self._tag diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/modes.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/modes.py new file mode 100644 index 0000000..e82c1a8 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/ciphers/modes.py @@ -0,0 +1,231 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +@six.add_metaclass(abc.ABCMeta) +class Mode(object): + @abc.abstractproperty + def name(self): + """ + A string naming this mode (e.g. "ECB", "CBC"). + """ + + @abc.abstractmethod + def validate_for_algorithm(self, algorithm): + """ + Checks that all the necessary invariants of this (mode, algorithm) + combination are met. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithInitializationVector(object): + @abc.abstractproperty + def initialization_vector(self): + """ + The value of the initialization vector for this mode as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithTweak(object): + @abc.abstractproperty + def tweak(self): + """ + The value of the tweak for this mode as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithNonce(object): + @abc.abstractproperty + def nonce(self): + """ + The value of the nonce for this mode as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithAuthenticationTag(object): + @abc.abstractproperty + def tag(self): + """ + The value of the tag supplied to the constructor of this mode. + """ + + +def _check_aes_key_length(self, algorithm): + if algorithm.key_size > 256 and algorithm.name == "AES": + raise ValueError( + "Only 128, 192, and 256 bit keys are allowed for this AES mode" + ) + + +def _check_iv_length(self, algorithm): + if len(self.initialization_vector) * 8 != algorithm.block_size: + raise ValueError("Invalid IV size ({0}) for {1}.".format( + len(self.initialization_vector), self.name + )) + + +def _check_iv_and_key_length(self, algorithm): + _check_aes_key_length(self, algorithm) + _check_iv_length(self, algorithm) + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class CBC(object): + name = "CBC" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithTweak) +class XTS(object): + name = "XTS" + + def __init__(self, tweak): + if not isinstance(tweak, bytes): + raise TypeError("tweak must be bytes") + + if len(tweak) != 16: + raise ValueError("tweak must be 128-bits (16 bytes)") + + self._tweak = tweak + + tweak = utils.read_only_property("_tweak") + + def validate_for_algorithm(self, algorithm): + if algorithm.key_size not in (256, 512): + raise ValueError( + "The XTS specification requires a 256-bit key for AES-128-XTS" + " and 512-bit key for AES-256-XTS" + ) + + +@utils.register_interface(Mode) +class ECB(object): + name = "ECB" + + validate_for_algorithm = _check_aes_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class OFB(object): + name = "OFB" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class CFB(object): + name = "CFB" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class CFB8(object): + name = "CFB8" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithNonce) +class CTR(object): + name = "CTR" + + def __init__(self, nonce): + if not isinstance(nonce, bytes): + raise TypeError("nonce must be bytes") + + self._nonce = nonce + + nonce = utils.read_only_property("_nonce") + + def validate_for_algorithm(self, algorithm): + _check_aes_key_length(self, algorithm) + if len(self.nonce) * 8 != algorithm.block_size: + raise ValueError("Invalid nonce size ({0}) for {1}.".format( + len(self.nonce), self.name + )) + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +@utils.register_interface(ModeWithAuthenticationTag) +class GCM(object): + name = "GCM" + _MAX_ENCRYPTED_BYTES = (2 ** 39 - 256) // 8 + _MAX_AAD_BYTES = (2 ** 64) // 8 + + def __init__(self, initialization_vector, tag=None, min_tag_length=16): + # len(initialization_vector) must in [1, 2 ** 64), but it's impossible + # to actually construct a bytes object that large, so we don't check + # for it + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + if len(initialization_vector) == 0: + raise ValueError("initialization_vector must be at least 1 byte") + self._initialization_vector = initialization_vector + if tag is not None: + if not isinstance(tag, bytes): + raise TypeError("tag must be bytes or None") + if min_tag_length < 4: + raise ValueError("min_tag_length must be >= 4") + if len(tag) < min_tag_length: + raise ValueError( + "Authentication tag must be {0} bytes or longer.".format( + min_tag_length) + ) + self._tag = tag + self._min_tag_length = min_tag_length + + tag = utils.read_only_property("_tag") + initialization_vector = utils.read_only_property("_initialization_vector") + + def validate_for_algorithm(self, algorithm): + _check_aes_key_length(self, algorithm) diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/cmac.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/cmac.py new file mode 100644 index 0000000..77537f0 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/cmac.py @@ -0,0 +1,66 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import CMACBackend +from cryptography.hazmat.primitives import ciphers, mac + + +@utils.register_interface(mac.MACContext) +class CMAC(object): + def __init__(self, algorithm, backend, ctx=None): + if not isinstance(backend, CMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement CMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, ciphers.BlockCipherAlgorithm): + raise TypeError( + "Expected instance of BlockCipherAlgorithm." + ) + self._algorithm = algorithm + + self._backend = backend + if ctx is None: + self._ctx = self._backend.create_cmac_ctx(self._algorithm) + else: + self._ctx = ctx + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + self._ctx.update(data) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + def verify(self, signature): + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(signature) + + def copy(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return CMAC( + self._algorithm, + backend=self._backend, + ctx=self._ctx.copy() + ) diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/constant_time.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/constant_time.py new file mode 100644 index 0000000..0e987ea --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/constant_time.py @@ -0,0 +1,35 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import hmac +import warnings + +from cryptography import utils +from cryptography.hazmat.bindings._constant_time import lib + + +if hasattr(hmac, "compare_digest"): + def bytes_eq(a, b): + if not isinstance(a, bytes) or not isinstance(b, bytes): + raise TypeError("a and b must be bytes.") + + return hmac.compare_digest(a, b) + +else: + warnings.warn( + "Support for your Python version is deprecated. The next version of " + "cryptography will remove support. Please upgrade to a 2.7.x " + "release that supports hmac.compare_digest as soon as possible.", + utils.DeprecatedIn23, + ) + + def bytes_eq(a, b): + if not isinstance(a, bytes) or not isinstance(b, bytes): + raise TypeError("a and b must be bytes.") + + return lib.Cryptography_constant_time_bytes_eq( + a, len(a), b, len(b) + ) == 1 diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/hashes.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/hashes.py new file mode 100644 index 0000000..3f3aadd --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/hashes.py @@ -0,0 +1,185 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HashBackend + + +@six.add_metaclass(abc.ABCMeta) +class HashAlgorithm(object): + @abc.abstractproperty + def name(self): + """ + A string naming this algorithm (e.g. "sha256", "md5"). + """ + + @abc.abstractproperty + def digest_size(self): + """ + The size of the resulting digest in bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class HashContext(object): + @abc.abstractproperty + def algorithm(self): + """ + A HashAlgorithm that will be used by this context. + """ + + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes through the hash. + """ + + @abc.abstractmethod + def finalize(self): + """ + Finalizes the hash context and returns the hash digest as bytes. + """ + + @abc.abstractmethod + def copy(self): + """ + Return a HashContext that is a copy of the current context. + """ + + +@utils.register_interface(HashContext) +class Hash(object): + def __init__(self, algorithm, backend, ctx=None): + if not isinstance(backend, HashBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HashBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + self._algorithm = algorithm + + self._backend = backend + + if ctx is None: + self._ctx = self._backend.create_hash_ctx(self.algorithm) + else: + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + self._ctx.update(data) + + def copy(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return Hash( + self.algorithm, backend=self._backend, ctx=self._ctx.copy() + ) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + +@utils.register_interface(HashAlgorithm) +class SHA1(object): + name = "sha1" + digest_size = 20 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class SHA224(object): + name = "sha224" + digest_size = 28 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class SHA256(object): + name = "sha256" + digest_size = 32 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class SHA384(object): + name = "sha384" + digest_size = 48 + block_size = 128 + + +@utils.register_interface(HashAlgorithm) +class SHA512(object): + name = "sha512" + digest_size = 64 + block_size = 128 + + +@utils.register_interface(HashAlgorithm) +class MD5(object): + name = "md5" + digest_size = 16 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class BLAKE2b(object): + name = "blake2b" + _max_digest_size = 64 + _min_digest_size = 1 + block_size = 128 + + def __init__(self, digest_size): + if ( + digest_size > self._max_digest_size or + digest_size < self._min_digest_size + ): + raise ValueError("Digest size must be {0}-{1}".format( + self._min_digest_size, self._max_digest_size) + ) + + self._digest_size = digest_size + + digest_size = utils.read_only_property("_digest_size") + + +@utils.register_interface(HashAlgorithm) +class BLAKE2s(object): + name = "blake2s" + block_size = 64 + _max_digest_size = 32 + _min_digest_size = 1 + + def __init__(self, digest_size): + if ( + digest_size > self._max_digest_size or + digest_size < self._min_digest_size + ): + raise ValueError("Digest size must be {0}-{1}".format( + self._min_digest_size, self._max_digest_size) + ) + + self._digest_size = digest_size + + digest_size = utils.read_only_property("_digest_size") diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/hmac.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/hmac.py new file mode 100644 index 0000000..2e9a4e2 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/hmac.py @@ -0,0 +1,69 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import hashes, mac + + +@utils.register_interface(mac.MACContext) +@utils.register_interface(hashes.HashContext) +class HMAC(object): + def __init__(self, key, algorithm, backend, ctx=None): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + self._algorithm = algorithm + + self._backend = backend + self._key = key + if ctx is None: + self._ctx = self._backend.create_hmac_ctx(key, self.algorithm) + else: + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + self._ctx.update(data) + + def copy(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return HMAC( + self._key, + self.algorithm, + backend=self._backend, + ctx=self._ctx.copy() + ) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + def verify(self, signature): + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(signature) diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/__init__.py new file mode 100644 index 0000000..2d0724e --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/__init__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class KeyDerivationFunction(object): + @abc.abstractmethod + def derive(self, key_material): + """ + Deterministically generates and returns a new key based on the existing + key material. + """ + + @abc.abstractmethod + def verify(self, key_material, expected_key): + """ + Checks whether the key generated by the key material matches the + expected derived key. Raises an exception if they do not match. + """ diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/concatkdf.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/concatkdf.py new file mode 100644 index 0000000..c6399e4 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/concatkdf.py @@ -0,0 +1,125 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.backends.interfaces import HashBackend +from cryptography.hazmat.primitives import constant_time, hashes, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +def _int_to_u32be(n): + return struct.pack('>I', n) + + +def _common_args_checks(algorithm, length, otherinfo): + max_length = algorithm.digest_size * (2 ** 32 - 1) + if length > max_length: + raise ValueError( + "Can not derive keys larger than {0} bits.".format( + max_length + )) + if not (otherinfo is None or isinstance(otherinfo, bytes)): + raise TypeError("otherinfo must be bytes.") + + +def _concatkdf_derive(key_material, length, auxfn, otherinfo): + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + output = [b""] + outlen = 0 + counter = 1 + + while (length > outlen): + h = auxfn() + h.update(_int_to_u32be(counter)) + h.update(key_material) + h.update(otherinfo) + output.append(h.finalize()) + outlen += len(output[-1]) + counter += 1 + + return b"".join(output)[:length] + + +@utils.register_interface(KeyDerivationFunction) +class ConcatKDFHash(object): + def __init__(self, algorithm, length, otherinfo, backend): + + _common_args_checks(algorithm, length, otherinfo) + self._algorithm = algorithm + self._length = length + self._otherinfo = otherinfo + if self._otherinfo is None: + self._otherinfo = b"" + + if not isinstance(backend, HashBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HashBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + self._backend = backend + self._used = False + + def _hash(self): + return hashes.Hash(self._algorithm, self._backend) + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + self._used = True + return _concatkdf_derive(key_material, self._length, + self._hash, self._otherinfo) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey + + +@utils.register_interface(KeyDerivationFunction) +class ConcatKDFHMAC(object): + def __init__(self, algorithm, length, salt, otherinfo, backend): + + _common_args_checks(algorithm, length, otherinfo) + self._algorithm = algorithm + self._length = length + self._otherinfo = otherinfo + if self._otherinfo is None: + self._otherinfo = b"" + + if not (salt is None or isinstance(salt, bytes)): + raise TypeError("salt must be bytes.") + if salt is None: + salt = b"\x00" * algorithm.block_size + self._salt = salt + + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + self._backend = backend + self._used = False + + def _hmac(self): + return hmac.HMAC(self._salt, self._algorithm, self._backend) + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + self._used = True + return _concatkdf_derive(key_material, self._length, + self._hmac, self._otherinfo) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/hkdf.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/hkdf.py new file mode 100644 index 0000000..917b4e9 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/hkdf.py @@ -0,0 +1,116 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import six + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +@utils.register_interface(KeyDerivationFunction) +class HKDF(object): + def __init__(self, algorithm, length, salt, info, backend): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._algorithm = algorithm + + if not (salt is None or isinstance(salt, bytes)): + raise TypeError("salt must be bytes.") + + if salt is None: + salt = b"\x00" * self._algorithm.digest_size + + self._salt = salt + + self._backend = backend + + self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend) + + def _extract(self, key_material): + h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend) + h.update(key_material) + return h.finalize() + + def derive(self, key_material): + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + return self._hkdf_expand.derive(self._extract(key_material)) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey + + +@utils.register_interface(KeyDerivationFunction) +class HKDFExpand(object): + def __init__(self, algorithm, length, info, backend): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._algorithm = algorithm + + self._backend = backend + + max_length = 255 * algorithm.digest_size + + if length > max_length: + raise ValueError( + "Can not derive keys larger than {0} octets.".format( + max_length + )) + + self._length = length + + if not (info is None or isinstance(info, bytes)): + raise TypeError("info must be bytes.") + + if info is None: + info = b"" + + self._info = info + + self._used = False + + def _expand(self, key_material): + output = [b""] + counter = 1 + + while self._algorithm.digest_size * (len(output) - 1) < self._length: + h = hmac.HMAC(key_material, self._algorithm, backend=self._backend) + h.update(output[-1]) + h.update(self._info) + h.update(six.int2byte(counter)) + output.append(h.finalize()) + counter += 1 + + return b"".join(output)[:self._length] + + def derive(self, key_material): + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + if self._used: + raise AlreadyFinalized + + self._used = True + return self._expand(key_material) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/kbkdf.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/kbkdf.py new file mode 100644 index 0000000..14de56e --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/kbkdf.py @@ -0,0 +1,148 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from enum import Enum + +from six.moves import range + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time, hashes, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +class Mode(Enum): + CounterMode = "ctr" + + +class CounterLocation(Enum): + BeforeFixed = "before_fixed" + AfterFixed = "after_fixed" + + +@utils.register_interface(KeyDerivationFunction) +class KBKDFHMAC(object): + def __init__(self, algorithm, mode, length, rlen, llen, + location, label, context, fixed, backend): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, hashes.HashAlgorithm): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported hash algorithm.", + _Reasons.UNSUPPORTED_HASH + ) + + if not backend.hmac_supported(algorithm): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported hmac algorithm.", + _Reasons.UNSUPPORTED_HASH + ) + + if not isinstance(mode, Mode): + raise TypeError("mode must be of type Mode") + + if not isinstance(location, CounterLocation): + raise TypeError("location must be of type CounterLocation") + + if (label or context) and fixed: + raise ValueError("When supplying fixed data, " + "label and context are ignored.") + + if rlen is None or not self._valid_byte_length(rlen): + raise ValueError("rlen must be between 1 and 4") + + if llen is None and fixed is None: + raise ValueError("Please specify an llen") + + if llen is not None and not isinstance(llen, int): + raise TypeError("llen must be an integer") + + if label is None: + label = b'' + + if context is None: + context = b'' + + if (not isinstance(label, bytes) or + not isinstance(context, bytes)): + raise TypeError('label and context must be of type bytes') + + self._algorithm = algorithm + self._mode = mode + self._length = length + self._rlen = rlen + self._llen = llen + self._location = location + self._label = label + self._context = context + self._backend = backend + self._used = False + self._fixed_data = fixed + + def _valid_byte_length(self, value): + if not isinstance(value, int): + raise TypeError('value must be of type int') + + value_bin = utils.int_to_bytes(1, value) + if not 1 <= len(value_bin) <= 4: + return False + return True + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + + if not isinstance(key_material, bytes): + raise TypeError('key_material must be bytes') + self._used = True + + # inverse floor division (equivalent to ceiling) + rounds = -(-self._length // self._algorithm.digest_size) + + output = [b''] + + # For counter mode, the number of iterations shall not be + # larger than 2^r-1, where r <= 32 is the binary length of the counter + # This ensures that the counter values used as an input to the + # PRF will not repeat during a particular call to the KDF function. + r_bin = utils.int_to_bytes(1, self._rlen) + if rounds > pow(2, len(r_bin) * 8) - 1: + raise ValueError('There are too many iterations.') + + for i in range(1, rounds + 1): + h = hmac.HMAC(key_material, self._algorithm, backend=self._backend) + + counter = utils.int_to_bytes(i, self._rlen) + if self._location == CounterLocation.BeforeFixed: + h.update(counter) + + h.update(self._generate_fixed_input()) + + if self._location == CounterLocation.AfterFixed: + h.update(counter) + + output.append(h.finalize()) + + return b''.join(output)[:self._length] + + def _generate_fixed_input(self): + if self._fixed_data and isinstance(self._fixed_data, bytes): + return self._fixed_data + + l_val = utils.int_to_bytes(self._length * 8, self._llen) + + return b"".join([self._label, b"\x00", self._context, l_val]) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/pbkdf2.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/pbkdf2.py new file mode 100644 index 0000000..f8ce7a3 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/pbkdf2.py @@ -0,0 +1,58 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import PBKDF2HMACBackend +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +@utils.register_interface(KeyDerivationFunction) +class PBKDF2HMAC(object): + def __init__(self, algorithm, length, salt, iterations, backend): + if not isinstance(backend, PBKDF2HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement PBKDF2HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not backend.pbkdf2_hmac_supported(algorithm): + raise UnsupportedAlgorithm( + "{0} is not supported for PBKDF2 by this backend.".format( + algorithm.name), + _Reasons.UNSUPPORTED_HASH + ) + self._used = False + self._algorithm = algorithm + self._length = length + if not isinstance(salt, bytes): + raise TypeError("salt must be bytes.") + self._salt = salt + self._iterations = iterations + self._backend = backend + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized("PBKDF2 instances can only be used once.") + self._used = True + + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + return self._backend.derive_pbkdf2_hmac( + self._algorithm, + self._length, + self._salt, + self._iterations, + key_material + ) + + def verify(self, key_material, expected_key): + derived_key = self.derive(key_material) + if not constant_time.bytes_eq(derived_key, expected_key): + raise InvalidKey("Keys do not match.") diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/scrypt.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/scrypt.py new file mode 100644 index 0000000..77dcf9a --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/scrypt.py @@ -0,0 +1,66 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import sys + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import ScryptBackend +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +# This is used by the scrypt tests to skip tests that require more memory +# than the MEM_LIMIT +_MEM_LIMIT = sys.maxsize // 2 + + +@utils.register_interface(KeyDerivationFunction) +class Scrypt(object): + def __init__(self, salt, length, n, r, p, backend): + if not isinstance(backend, ScryptBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement ScryptBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._length = length + if not isinstance(salt, bytes): + raise TypeError("salt must be bytes.") + + if n < 2 or (n & (n - 1)) != 0: + raise ValueError("n must be greater than 1 and be a power of 2.") + + if r < 1: + raise ValueError("r must be greater than or equal to 1.") + + if p < 1: + raise ValueError("p must be greater than or equal to 1.") + + self._used = False + self._salt = salt + self._n = n + self._r = r + self._p = p + self._backend = backend + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized("Scrypt instances can only be used once.") + self._used = True + + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + return self._backend.derive_scrypt( + key_material, self._salt, self._length, self._n, self._r, self._p + ) + + def verify(self, key_material, expected_key): + derived_key = self.derive(key_material) + if not constant_time.bytes_eq(derived_key, expected_key): + raise InvalidKey("Keys do not match.") diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/x963kdf.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/x963kdf.py new file mode 100644 index 0000000..83789b3 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/kdf/x963kdf.py @@ -0,0 +1,70 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HashBackend +from cryptography.hazmat.primitives import constant_time, hashes +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +def _int_to_u32be(n): + return struct.pack('>I', n) + + +@utils.register_interface(KeyDerivationFunction) +class X963KDF(object): + def __init__(self, algorithm, length, sharedinfo, backend): + + max_len = algorithm.digest_size * (2 ** 32 - 1) + if length > max_len: + raise ValueError( + "Can not derive keys larger than {0} bits.".format(max_len)) + if not (sharedinfo is None or isinstance(sharedinfo, bytes)): + raise TypeError("sharedinfo must be bytes.") + self._algorithm = algorithm + self._length = length + self._sharedinfo = sharedinfo + + if not isinstance(backend, HashBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HashBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + self._backend = backend + self._used = False + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + self._used = True + + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + output = [b""] + outlen = 0 + counter = 1 + + while self._length > outlen: + h = hashes.Hash(self._algorithm, self._backend) + h.update(key_material) + h.update(_int_to_u32be(counter)) + if self._sharedinfo is not None: + h.update(self._sharedinfo) + output.append(h.finalize()) + outlen += len(output[-1]) + counter += 1 + + return b"".join(output)[:self._length] + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/keywrap.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/keywrap.py new file mode 100644 index 0000000..f55c519 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/keywrap.py @@ -0,0 +1,154 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import ECB +from cryptography.hazmat.primitives.constant_time import bytes_eq + + +def _wrap_core(wrapping_key, a, r, backend): + # RFC 3394 Key Wrap - 2.2.1 (index method) + encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor() + n = len(r) + for j in range(6): + for i in range(n): + # every encryption operation is a discrete 16 byte chunk (because + # AES has a 128-bit block size) and since we're using ECB it is + # safe to reuse the encryptor for the entire operation + b = encryptor.update(a + r[i]) + # pack/unpack are safe as these are always 64-bit chunks + a = struct.pack( + ">Q", struct.unpack(">Q", b[:8])[0] ^ ((n * j) + i + 1) + ) + r[i] = b[-8:] + + assert encryptor.finalize() == b"" + + return a + b"".join(r) + + +def aes_key_wrap(wrapping_key, key_to_wrap, backend): + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + if len(key_to_wrap) < 16: + raise ValueError("The key to wrap must be at least 16 bytes") + + if len(key_to_wrap) % 8 != 0: + raise ValueError("The key to wrap must be a multiple of 8 bytes") + + a = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6" + r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)] + return _wrap_core(wrapping_key, a, r, backend) + + +def _unwrap_core(wrapping_key, a, r, backend): + # Implement RFC 3394 Key Unwrap - 2.2.2 (index method) + decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor() + n = len(r) + for j in reversed(range(6)): + for i in reversed(range(n)): + # pack/unpack are safe as these are always 64-bit chunks + atr = struct.pack( + ">Q", struct.unpack(">Q", a)[0] ^ ((n * j) + i + 1) + ) + r[i] + # every decryption operation is a discrete 16 byte chunk so + # it is safe to reuse the decryptor for the entire operation + b = decryptor.update(atr) + a = b[:8] + r[i] = b[-8:] + + assert decryptor.finalize() == b"" + return a, r + + +def aes_key_wrap_with_padding(wrapping_key, key_to_wrap, backend): + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + aiv = b"\xA6\x59\x59\xA6" + struct.pack(">i", len(key_to_wrap)) + # pad the key to wrap if necessary + pad = (8 - (len(key_to_wrap) % 8)) % 8 + key_to_wrap = key_to_wrap + b"\x00" * pad + if len(key_to_wrap) == 8: + # RFC 5649 - 4.1 - exactly 8 octets after padding + encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor() + b = encryptor.update(aiv + key_to_wrap) + assert encryptor.finalize() == b"" + return b + else: + r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)] + return _wrap_core(wrapping_key, aiv, r, backend) + + +def aes_key_unwrap_with_padding(wrapping_key, wrapped_key, backend): + if len(wrapped_key) < 16: + raise InvalidUnwrap("Must be at least 16 bytes") + + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + if len(wrapped_key) == 16: + # RFC 5649 - 4.2 - exactly two 64-bit blocks + decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor() + b = decryptor.update(wrapped_key) + assert decryptor.finalize() == b"" + a = b[:8] + data = b[8:] + n = 1 + else: + r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)] + encrypted_aiv = r.pop(0) + n = len(r) + a, r = _unwrap_core(wrapping_key, encrypted_aiv, r, backend) + data = b"".join(r) + + # 1) Check that MSB(32,A) = A65959A6. + # 2) Check that 8*(n-1) < LSB(32,A) <= 8*n. If so, let + # MLI = LSB(32,A). + # 3) Let b = (8*n)-MLI, and then check that the rightmost b octets of + # the output data are zero. + (mli,) = struct.unpack(">I", a[4:]) + b = (8 * n) - mli + if ( + not bytes_eq(a[:4], b"\xa6\x59\x59\xa6") or not + 8 * (n - 1) < mli <= 8 * n or ( + b != 0 and not bytes_eq(data[-b:], b"\x00" * b) + ) + ): + raise InvalidUnwrap() + + if b == 0: + return data + else: + return data[:-b] + + +def aes_key_unwrap(wrapping_key, wrapped_key, backend): + if len(wrapped_key) < 24: + raise InvalidUnwrap("Must be at least 24 bytes") + + if len(wrapped_key) % 8 != 0: + raise InvalidUnwrap("The wrapped key must be a multiple of 8 bytes") + + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + aiv = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6" + r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)] + a = r.pop(0) + a, r = _unwrap_core(wrapping_key, a, r, backend) + if not bytes_eq(a, aiv): + raise InvalidUnwrap() + + return b"".join(r) + + +class InvalidUnwrap(Exception): + pass diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/mac.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/mac.py new file mode 100644 index 0000000..4c95190 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/mac.py @@ -0,0 +1,37 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class MACContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes. + """ + + @abc.abstractmethod + def finalize(self): + """ + Returns the message authentication code as bytes. + """ + + @abc.abstractmethod + def copy(self): + """ + Return a MACContext that is a copy of the current context. + """ + + @abc.abstractmethod + def verify(self, signature): + """ + Checks if the generated message authentication code matches the + signature. + """ diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/padding.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/padding.py new file mode 100644 index 0000000..a081976 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/padding.py @@ -0,0 +1,202 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils +from cryptography.exceptions import AlreadyFinalized +from cryptography.hazmat.bindings._padding import lib + + +@six.add_metaclass(abc.ABCMeta) +class PaddingContext(object): + @abc.abstractmethod + def update(self, data): + """ + Pads the provided bytes and returns any available data as bytes. + """ + + @abc.abstractmethod + def finalize(self): + """ + Finalize the padding, returns bytes. + """ + + +def _byte_padding_check(block_size): + if not (0 <= block_size <= 2040): + raise ValueError("block_size must be in range(0, 2041).") + + if block_size % 8 != 0: + raise ValueError("block_size must be a multiple of 8.") + + +def _byte_padding_update(buffer_, data, block_size): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + + buffer_ += data + + finished_blocks = len(buffer_) // (block_size // 8) + + result = buffer_[:finished_blocks * (block_size // 8)] + buffer_ = buffer_[finished_blocks * (block_size // 8):] + + return buffer_, result + + +def _byte_padding_pad(buffer_, block_size, paddingfn): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + pad_size = block_size // 8 - len(buffer_) + return buffer_ + paddingfn(pad_size) + + +def _byte_unpadding_update(buffer_, data, block_size): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + + buffer_ += data + + finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0) + + result = buffer_[:finished_blocks * (block_size // 8)] + buffer_ = buffer_[finished_blocks * (block_size // 8):] + + return buffer_, result + + +def _byte_unpadding_check(buffer_, block_size, checkfn): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + if len(buffer_) != block_size // 8: + raise ValueError("Invalid padding bytes.") + + valid = checkfn(buffer_, block_size // 8) + + if not valid: + raise ValueError("Invalid padding bytes.") + + pad_size = six.indexbytes(buffer_, -1) + return buffer_[:-pad_size] + + +class PKCS7(object): + def __init__(self, block_size): + _byte_padding_check(block_size) + self.block_size = block_size + + def padder(self): + return _PKCS7PaddingContext(self.block_size) + + def unpadder(self): + return _PKCS7UnpaddingContext(self.block_size) + + +@utils.register_interface(PaddingContext) +class _PKCS7PaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_padding_update( + self._buffer, data, self.block_size) + return result + + def _padding(self, size): + return six.int2byte(size) * size + + def finalize(self): + result = _byte_padding_pad( + self._buffer, self.block_size, self._padding) + self._buffer = None + return result + + +@utils.register_interface(PaddingContext) +class _PKCS7UnpaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_unpadding_update( + self._buffer, data, self.block_size) + return result + + def finalize(self): + result = _byte_unpadding_check( + self._buffer, self.block_size, + lib.Cryptography_check_pkcs7_padding) + self._buffer = None + return result + + +class ANSIX923(object): + def __init__(self, block_size): + _byte_padding_check(block_size) + self.block_size = block_size + + def padder(self): + return _ANSIX923PaddingContext(self.block_size) + + def unpadder(self): + return _ANSIX923UnpaddingContext(self.block_size) + + +@utils.register_interface(PaddingContext) +class _ANSIX923PaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_padding_update( + self._buffer, data, self.block_size) + return result + + def _padding(self, size): + return six.int2byte(0) * (size - 1) + six.int2byte(size) + + def finalize(self): + result = _byte_padding_pad( + self._buffer, self.block_size, self._padding) + self._buffer = None + return result + + +@utils.register_interface(PaddingContext) +class _ANSIX923UnpaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_unpadding_update( + self._buffer, data, self.block_size) + return result + + def finalize(self): + result = _byte_unpadding_check( + self._buffer, self.block_size, + lib.Cryptography_check_ansix923_padding) + self._buffer = None + return result diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/serialization.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/serialization.py new file mode 100644 index 0000000..bd09e6e --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/serialization.py @@ -0,0 +1,209 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import base64 +import struct +from enum import Enum + +import six + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa + + +def load_pem_private_key(data, password, backend): + return backend.load_pem_private_key(data, password) + + +def load_pem_public_key(data, backend): + return backend.load_pem_public_key(data) + + +def load_pem_parameters(data, backend): + return backend.load_pem_parameters(data) + + +def load_der_private_key(data, password, backend): + return backend.load_der_private_key(data, password) + + +def load_der_public_key(data, backend): + return backend.load_der_public_key(data) + + +def load_der_parameters(data, backend): + return backend.load_der_parameters(data) + + +def load_ssh_public_key(data, backend): + key_parts = data.split(b' ', 2) + + if len(key_parts) < 2: + raise ValueError( + 'Key is not in the proper format or contains extra data.') + + key_type = key_parts[0] + + if key_type == b'ssh-rsa': + loader = _load_ssh_rsa_public_key + elif key_type == b'ssh-dss': + loader = _load_ssh_dss_public_key + elif key_type in [ + b'ecdsa-sha2-nistp256', b'ecdsa-sha2-nistp384', b'ecdsa-sha2-nistp521', + ]: + loader = _load_ssh_ecdsa_public_key + else: + raise UnsupportedAlgorithm('Key type is not supported.') + + key_body = key_parts[1] + + try: + decoded_data = base64.b64decode(key_body) + except TypeError: + raise ValueError('Key is not in the proper format.') + + inner_key_type, rest = _ssh_read_next_string(decoded_data) + + if inner_key_type != key_type: + raise ValueError( + 'Key header and key body contain different key type values.' + ) + + return loader(key_type, rest, backend) + + +def _load_ssh_rsa_public_key(key_type, decoded_data, backend): + e, rest = _ssh_read_next_mpint(decoded_data) + n, rest = _ssh_read_next_mpint(rest) + + if rest: + raise ValueError('Key body contains extra bytes.') + + return rsa.RSAPublicNumbers(e, n).public_key(backend) + + +def _load_ssh_dss_public_key(key_type, decoded_data, backend): + p, rest = _ssh_read_next_mpint(decoded_data) + q, rest = _ssh_read_next_mpint(rest) + g, rest = _ssh_read_next_mpint(rest) + y, rest = _ssh_read_next_mpint(rest) + + if rest: + raise ValueError('Key body contains extra bytes.') + + parameter_numbers = dsa.DSAParameterNumbers(p, q, g) + public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers) + + return public_numbers.public_key(backend) + + +def _load_ssh_ecdsa_public_key(expected_key_type, decoded_data, backend): + curve_name, rest = _ssh_read_next_string(decoded_data) + data, rest = _ssh_read_next_string(rest) + + if expected_key_type != b"ecdsa-sha2-" + curve_name: + raise ValueError( + 'Key header and key body contain different key type values.' + ) + + if rest: + raise ValueError('Key body contains extra bytes.') + + curve = { + b"nistp256": ec.SECP256R1, + b"nistp384": ec.SECP384R1, + b"nistp521": ec.SECP521R1, + }[curve_name]() + + if six.indexbytes(data, 0) != 4: + raise NotImplementedError( + "Compressed elliptic curve points are not supported" + ) + + numbers = ec.EllipticCurvePublicNumbers.from_encoded_point(curve, data) + return numbers.public_key(backend) + + +def _ssh_read_next_string(data): + """ + Retrieves the next RFC 4251 string value from the data. + + While the RFC calls these strings, in Python they are bytes objects. + """ + if len(data) < 4: + raise ValueError("Key is not in the proper format") + + str_len, = struct.unpack('>I', data[:4]) + if len(data) < str_len + 4: + raise ValueError("Key is not in the proper format") + + return data[4:4 + str_len], data[4 + str_len:] + + +def _ssh_read_next_mpint(data): + """ + Reads the next mpint from the data. + + Currently, all mpints are interpreted as unsigned. + """ + mpint_data, rest = _ssh_read_next_string(data) + + return ( + utils.int_from_bytes(mpint_data, byteorder='big', signed=False), rest + ) + + +def _ssh_write_string(data): + return struct.pack(">I", len(data)) + data + + +def _ssh_write_mpint(value): + data = utils.int_to_bytes(value) + if six.indexbytes(data, 0) & 0x80: + data = b"\x00" + data + return _ssh_write_string(data) + + +class Encoding(Enum): + PEM = "PEM" + DER = "DER" + OpenSSH = "OpenSSH" + + +class PrivateFormat(Enum): + PKCS8 = "PKCS8" + TraditionalOpenSSL = "TraditionalOpenSSL" + + +class PublicFormat(Enum): + SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1" + PKCS1 = "Raw PKCS#1" + OpenSSH = "OpenSSH" + + +class ParameterFormat(Enum): + PKCS3 = "PKCS3" + + +@six.add_metaclass(abc.ABCMeta) +class KeySerializationEncryption(object): + pass + + +@utils.register_interface(KeySerializationEncryption) +class BestAvailableEncryption(object): + def __init__(self, password): + if not isinstance(password, bytes) or len(password) == 0: + raise ValueError("Password must be 1 or more bytes.") + + self.password = password + + +@utils.register_interface(KeySerializationEncryption) +class NoEncryption(object): + pass diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/__init__.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/__init__.py new file mode 100644 index 0000000..e71f9e6 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/__init__.py @@ -0,0 +1,9 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +class InvalidToken(Exception): + pass diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/hotp.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/hotp.py new file mode 100644 index 0000000..4ad1bdc --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/hotp.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +import six + +from cryptography.exceptions import ( + UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time, hmac +from cryptography.hazmat.primitives.hashes import SHA1, SHA256, SHA512 +from cryptography.hazmat.primitives.twofactor import InvalidToken +from cryptography.hazmat.primitives.twofactor.utils import _generate_uri + + +class HOTP(object): + def __init__(self, key, length, algorithm, backend, + enforce_key_length=True): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if len(key) < 16 and enforce_key_length is True: + raise ValueError("Key length has to be at least 128 bits.") + + if not isinstance(length, six.integer_types): + raise TypeError("Length parameter must be an integer type.") + + if length < 6 or length > 8: + raise ValueError("Length of HOTP has to be between 6 to 8.") + + if not isinstance(algorithm, (SHA1, SHA256, SHA512)): + raise TypeError("Algorithm must be SHA1, SHA256 or SHA512.") + + self._key = key + self._length = length + self._algorithm = algorithm + self._backend = backend + + def generate(self, counter): + truncated_value = self._dynamic_truncate(counter) + hotp = truncated_value % (10 ** self._length) + return "{0:0{1}}".format(hotp, self._length).encode() + + def verify(self, hotp, counter): + if not constant_time.bytes_eq(self.generate(counter), hotp): + raise InvalidToken("Supplied HOTP value does not match.") + + def _dynamic_truncate(self, counter): + ctx = hmac.HMAC(self._key, self._algorithm, self._backend) + ctx.update(struct.pack(">Q", counter)) + hmac_value = ctx.finalize() + + offset = six.indexbytes(hmac_value, len(hmac_value) - 1) & 0b1111 + p = hmac_value[offset:offset + 4] + return struct.unpack(">I", p)[0] & 0x7fffffff + + def get_provisioning_uri(self, account_name, counter, issuer): + return _generate_uri(self, "hotp", account_name, issuer, [ + ("counter", int(counter)), + ]) diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/totp.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/totp.py new file mode 100644 index 0000000..499f282 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/totp.py @@ -0,0 +1,40 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.exceptions import ( + UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.twofactor import InvalidToken +from cryptography.hazmat.primitives.twofactor.hotp import HOTP +from cryptography.hazmat.primitives.twofactor.utils import _generate_uri + + +class TOTP(object): + def __init__(self, key, length, algorithm, time_step, backend, + enforce_key_length=True): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._time_step = time_step + self._hotp = HOTP(key, length, algorithm, backend, enforce_key_length) + + def generate(self, time): + counter = int(time / self._time_step) + return self._hotp.generate(counter) + + def verify(self, totp, time): + if not constant_time.bytes_eq(self.generate(time), totp): + raise InvalidToken("Supplied TOTP value does not match.") + + def get_provisioning_uri(self, account_name, issuer): + return _generate_uri(self._hotp, "totp", account_name, issuer, [ + ("period", int(self._time_step)), + ]) diff --git a/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/utils.py b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/utils.py new file mode 100644 index 0000000..0ed8c4c --- /dev/null +++ b/server/www/packages/packages-common/cryptography/hazmat/primitives/twofactor/utils.py @@ -0,0 +1,30 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import base64 + +from six.moves.urllib.parse import quote, urlencode + + +def _generate_uri(hotp, type_name, account_name, issuer, extra_parameters): + parameters = [ + ("digits", hotp._length), + ("secret", base64.b32encode(hotp._key)), + ("algorithm", hotp._algorithm.name.upper()), + ] + + if issuer is not None: + parameters.append(("issuer", issuer)) + + parameters.extend(extra_parameters) + + uriparts = { + "type": type_name, + "label": ("%s:%s" % (quote(issuer), quote(account_name)) if issuer + else quote(account_name)), + "parameters": urlencode(parameters), + } + return "otpauth://{type}/{label}?{parameters}".format(**uriparts) diff --git a/server/www/packages/packages-common/cryptography/utils.py b/server/www/packages/packages-common/cryptography/utils.py new file mode 100644 index 0000000..3d45a77 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/utils.py @@ -0,0 +1,165 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import binascii +import inspect +import sys +import warnings + + +# We use a UserWarning subclass, instead of DeprecationWarning, because CPython +# decided deprecation warnings should be invisble by default. +class CryptographyDeprecationWarning(UserWarning): + pass + + +# Several APIs were deprecated with no specific end-of-life date because of the +# ubiquity of their use. They should not be removed until we agree on when that +# cycle ends. +PersistentlyDeprecated = CryptographyDeprecationWarning +DeprecatedIn21 = CryptographyDeprecationWarning +DeprecatedIn23 = CryptographyDeprecationWarning + + +def _check_bytes(name, value): + if not isinstance(value, bytes): + raise TypeError("{0} must be bytes".format(name)) + + +def read_only_property(name): + return property(lambda self: getattr(self, name)) + + +def register_interface(iface): + def register_decorator(klass): + verify_interface(iface, klass) + iface.register(klass) + return klass + return register_decorator + + +def register_interface_if(predicate, iface): + def register_decorator(klass): + if predicate: + verify_interface(iface, klass) + iface.register(klass) + return klass + return register_decorator + + +if hasattr(int, "from_bytes"): + int_from_bytes = int.from_bytes +else: + def int_from_bytes(data, byteorder, signed=False): + assert byteorder == 'big' + assert not signed + + return int(binascii.hexlify(data), 16) + + +if hasattr(int, "to_bytes"): + def int_to_bytes(integer, length=None): + return integer.to_bytes( + length or (integer.bit_length() + 7) // 8 or 1, 'big' + ) +else: + def int_to_bytes(integer, length=None): + hex_string = '%x' % integer + if length is None: + n = len(hex_string) + else: + n = length * 2 + return binascii.unhexlify(hex_string.zfill(n + (n & 1))) + + +class InterfaceNotImplemented(Exception): + pass + + +if hasattr(inspect, "signature"): + signature = inspect.signature +else: + signature = inspect.getargspec + + +def verify_interface(iface, klass): + for method in iface.__abstractmethods__: + if not hasattr(klass, method): + raise InterfaceNotImplemented( + "{0} is missing a {1!r} method".format(klass, method) + ) + if isinstance(getattr(iface, method), abc.abstractproperty): + # Can't properly verify these yet. + continue + sig = signature(getattr(iface, method)) + actual = signature(getattr(klass, method)) + if sig != actual: + raise InterfaceNotImplemented( + "{0}.{1}'s signature differs from the expected. Expected: " + "{2!r}. Received: {3!r}".format( + klass, method, sig, actual + ) + ) + + +# No longer needed as of 2.2, but retained because we have external consumers +# who use it. +def bit_length(x): + return x.bit_length() + + +class _DeprecatedValue(object): + def __init__(self, value, message, warning_class): + self.value = value + self.message = message + self.warning_class = warning_class + + +class _ModuleWithDeprecations(object): + def __init__(self, module): + self.__dict__["_module"] = module + + def __getattr__(self, attr): + obj = getattr(self._module, attr) + if isinstance(obj, _DeprecatedValue): + warnings.warn(obj.message, obj.warning_class, stacklevel=2) + obj = obj.value + return obj + + def __setattr__(self, attr, value): + setattr(self._module, attr, value) + + def __delattr__(self, attr): + obj = getattr(self._module, attr) + if isinstance(obj, _DeprecatedValue): + warnings.warn(obj.message, obj.warning_class, stacklevel=2) + + delattr(self._module, attr) + + def __dir__(self): + return ["_module"] + dir(self._module) + + +def deprecated(value, module_name, message, warning_class): + module = sys.modules[module_name] + if not isinstance(module, _ModuleWithDeprecations): + sys.modules[module_name] = _ModuleWithDeprecations(module) + return _DeprecatedValue(value, message, warning_class) + + +def cached_property(func): + cached_name = "_cached_{0}".format(func) + sentinel = object() + + def inner(instance): + cache = getattr(instance, cached_name, sentinel) + if cache is not sentinel: + return cache + result = func(instance) + setattr(instance, cached_name, result) + return result + return property(inner) diff --git a/server/www/packages/packages-common/cryptography/x509/__init__.py b/server/www/packages/packages-common/cryptography/x509/__init__.py new file mode 100644 index 0000000..d2f9b04 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/x509/__init__.py @@ -0,0 +1,185 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.x509 import certificate_transparency +from cryptography.x509.base import ( + Certificate, CertificateBuilder, CertificateRevocationList, + CertificateRevocationListBuilder, + CertificateSigningRequest, CertificateSigningRequestBuilder, + InvalidVersion, RevokedCertificate, RevokedCertificateBuilder, + Version, load_der_x509_certificate, load_der_x509_crl, load_der_x509_csr, + load_pem_x509_certificate, load_pem_x509_crl, load_pem_x509_csr, + random_serial_number, +) +from cryptography.x509.extensions import ( + AccessDescription, AuthorityInformationAccess, + AuthorityKeyIdentifier, BasicConstraints, CRLDistributionPoints, + CRLNumber, CRLReason, CertificateIssuer, CertificatePolicies, + DeltaCRLIndicator, DistributionPoint, DuplicateExtension, ExtendedKeyUsage, + Extension, ExtensionNotFound, ExtensionType, Extensions, FreshestCRL, + GeneralNames, InhibitAnyPolicy, InvalidityDate, IssuerAlternativeName, + KeyUsage, NameConstraints, NoticeReference, OCSPNoCheck, PolicyConstraints, + PolicyInformation, PrecertificateSignedCertificateTimestamps, ReasonFlags, + SubjectAlternativeName, SubjectKeyIdentifier, TLSFeature, TLSFeatureType, + UnrecognizedExtension, UserNotice +) +from cryptography.x509.general_name import ( + DNSName, DirectoryName, GeneralName, IPAddress, OtherName, RFC822Name, + RegisteredID, UniformResourceIdentifier, UnsupportedGeneralNameType, + _GENERAL_NAMES +) +from cryptography.x509.name import ( + Name, NameAttribute, RelativeDistinguishedName +) +from cryptography.x509.oid import ( + AuthorityInformationAccessOID, CRLEntryExtensionOID, + CertificatePoliciesOID, ExtendedKeyUsageOID, ExtensionOID, NameOID, + ObjectIdentifier, SignatureAlgorithmOID, _SIG_OIDS_TO_HASH +) + + +OID_AUTHORITY_INFORMATION_ACCESS = ExtensionOID.AUTHORITY_INFORMATION_ACCESS +OID_AUTHORITY_KEY_IDENTIFIER = ExtensionOID.AUTHORITY_KEY_IDENTIFIER +OID_BASIC_CONSTRAINTS = ExtensionOID.BASIC_CONSTRAINTS +OID_CERTIFICATE_POLICIES = ExtensionOID.CERTIFICATE_POLICIES +OID_CRL_DISTRIBUTION_POINTS = ExtensionOID.CRL_DISTRIBUTION_POINTS +OID_EXTENDED_KEY_USAGE = ExtensionOID.EXTENDED_KEY_USAGE +OID_FRESHEST_CRL = ExtensionOID.FRESHEST_CRL +OID_INHIBIT_ANY_POLICY = ExtensionOID.INHIBIT_ANY_POLICY +OID_ISSUER_ALTERNATIVE_NAME = ExtensionOID.ISSUER_ALTERNATIVE_NAME +OID_KEY_USAGE = ExtensionOID.KEY_USAGE +OID_NAME_CONSTRAINTS = ExtensionOID.NAME_CONSTRAINTS +OID_OCSP_NO_CHECK = ExtensionOID.OCSP_NO_CHECK +OID_POLICY_CONSTRAINTS = ExtensionOID.POLICY_CONSTRAINTS +OID_POLICY_MAPPINGS = ExtensionOID.POLICY_MAPPINGS +OID_SUBJECT_ALTERNATIVE_NAME = ExtensionOID.SUBJECT_ALTERNATIVE_NAME +OID_SUBJECT_DIRECTORY_ATTRIBUTES = ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES +OID_SUBJECT_INFORMATION_ACCESS = ExtensionOID.SUBJECT_INFORMATION_ACCESS +OID_SUBJECT_KEY_IDENTIFIER = ExtensionOID.SUBJECT_KEY_IDENTIFIER + +OID_DSA_WITH_SHA1 = SignatureAlgorithmOID.DSA_WITH_SHA1 +OID_DSA_WITH_SHA224 = SignatureAlgorithmOID.DSA_WITH_SHA224 +OID_DSA_WITH_SHA256 = SignatureAlgorithmOID.DSA_WITH_SHA256 +OID_ECDSA_WITH_SHA1 = SignatureAlgorithmOID.ECDSA_WITH_SHA1 +OID_ECDSA_WITH_SHA224 = SignatureAlgorithmOID.ECDSA_WITH_SHA224 +OID_ECDSA_WITH_SHA256 = SignatureAlgorithmOID.ECDSA_WITH_SHA256 +OID_ECDSA_WITH_SHA384 = SignatureAlgorithmOID.ECDSA_WITH_SHA384 +OID_ECDSA_WITH_SHA512 = SignatureAlgorithmOID.ECDSA_WITH_SHA512 +OID_RSA_WITH_MD5 = SignatureAlgorithmOID.RSA_WITH_MD5 +OID_RSA_WITH_SHA1 = SignatureAlgorithmOID.RSA_WITH_SHA1 +OID_RSA_WITH_SHA224 = SignatureAlgorithmOID.RSA_WITH_SHA224 +OID_RSA_WITH_SHA256 = SignatureAlgorithmOID.RSA_WITH_SHA256 +OID_RSA_WITH_SHA384 = SignatureAlgorithmOID.RSA_WITH_SHA384 +OID_RSA_WITH_SHA512 = SignatureAlgorithmOID.RSA_WITH_SHA512 +OID_RSASSA_PSS = SignatureAlgorithmOID.RSASSA_PSS + +OID_COMMON_NAME = NameOID.COMMON_NAME +OID_COUNTRY_NAME = NameOID.COUNTRY_NAME +OID_DOMAIN_COMPONENT = NameOID.DOMAIN_COMPONENT +OID_DN_QUALIFIER = NameOID.DN_QUALIFIER +OID_EMAIL_ADDRESS = NameOID.EMAIL_ADDRESS +OID_GENERATION_QUALIFIER = NameOID.GENERATION_QUALIFIER +OID_GIVEN_NAME = NameOID.GIVEN_NAME +OID_LOCALITY_NAME = NameOID.LOCALITY_NAME +OID_ORGANIZATIONAL_UNIT_NAME = NameOID.ORGANIZATIONAL_UNIT_NAME +OID_ORGANIZATION_NAME = NameOID.ORGANIZATION_NAME +OID_PSEUDONYM = NameOID.PSEUDONYM +OID_SERIAL_NUMBER = NameOID.SERIAL_NUMBER +OID_STATE_OR_PROVINCE_NAME = NameOID.STATE_OR_PROVINCE_NAME +OID_SURNAME = NameOID.SURNAME +OID_TITLE = NameOID.TITLE + +OID_CLIENT_AUTH = ExtendedKeyUsageOID.CLIENT_AUTH +OID_CODE_SIGNING = ExtendedKeyUsageOID.CODE_SIGNING +OID_EMAIL_PROTECTION = ExtendedKeyUsageOID.EMAIL_PROTECTION +OID_OCSP_SIGNING = ExtendedKeyUsageOID.OCSP_SIGNING +OID_SERVER_AUTH = ExtendedKeyUsageOID.SERVER_AUTH +OID_TIME_STAMPING = ExtendedKeyUsageOID.TIME_STAMPING + +OID_ANY_POLICY = CertificatePoliciesOID.ANY_POLICY +OID_CPS_QUALIFIER = CertificatePoliciesOID.CPS_QUALIFIER +OID_CPS_USER_NOTICE = CertificatePoliciesOID.CPS_USER_NOTICE + +OID_CERTIFICATE_ISSUER = CRLEntryExtensionOID.CERTIFICATE_ISSUER +OID_CRL_REASON = CRLEntryExtensionOID.CRL_REASON +OID_INVALIDITY_DATE = CRLEntryExtensionOID.INVALIDITY_DATE + +OID_CA_ISSUERS = AuthorityInformationAccessOID.CA_ISSUERS +OID_OCSP = AuthorityInformationAccessOID.OCSP + +__all__ = [ + "certificate_transparency", + "load_pem_x509_certificate", + "load_der_x509_certificate", + "load_pem_x509_csr", + "load_der_x509_csr", + "load_pem_x509_crl", + "load_der_x509_crl", + "random_serial_number", + "InvalidVersion", + "DeltaCRLIndicator", + "DuplicateExtension", + "ExtensionNotFound", + "UnsupportedGeneralNameType", + "NameAttribute", + "Name", + "RelativeDistinguishedName", + "ObjectIdentifier", + "ExtensionType", + "Extensions", + "Extension", + "ExtendedKeyUsage", + "FreshestCRL", + "TLSFeature", + "TLSFeatureType", + "OCSPNoCheck", + "BasicConstraints", + "CRLNumber", + "KeyUsage", + "AuthorityInformationAccess", + "AccessDescription", + "CertificatePolicies", + "PolicyInformation", + "UserNotice", + "NoticeReference", + "SubjectKeyIdentifier", + "NameConstraints", + "CRLDistributionPoints", + "DistributionPoint", + "ReasonFlags", + "InhibitAnyPolicy", + "SubjectAlternativeName", + "IssuerAlternativeName", + "AuthorityKeyIdentifier", + "GeneralNames", + "GeneralName", + "RFC822Name", + "DNSName", + "UniformResourceIdentifier", + "RegisteredID", + "DirectoryName", + "IPAddress", + "OtherName", + "Certificate", + "CertificateRevocationList", + "CertificateRevocationListBuilder", + "CertificateSigningRequest", + "RevokedCertificate", + "RevokedCertificateBuilder", + "CertificateSigningRequestBuilder", + "CertificateBuilder", + "Version", + "_SIG_OIDS_TO_HASH", + "OID_CA_ISSUERS", + "OID_OCSP", + "_GENERAL_NAMES", + "CertificateIssuer", + "CRLReason", + "InvalidityDate", + "UnrecognizedExtension", + "PolicyConstraints", + "PrecertificateSignedCertificateTimestamps", +] diff --git a/server/www/packages/packages-common/cryptography/x509/base.py b/server/www/packages/packages-common/cryptography/x509/base.py new file mode 100644 index 0000000..b14499c --- /dev/null +++ b/server/www/packages/packages-common/cryptography/x509/base.py @@ -0,0 +1,743 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import datetime +import os +from enum import Enum + +import six + +from cryptography import utils +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa +from cryptography.x509.extensions import Extension, ExtensionType +from cryptography.x509.name import Name + + +_UNIX_EPOCH = datetime.datetime(1970, 1, 1) + + +def _convert_to_naive_utc_time(time): + """Normalizes a datetime to a naive datetime in UTC. + + time -- datetime to normalize. Assumed to be in UTC if not timezone + aware. + """ + if time.tzinfo is not None: + offset = time.utcoffset() + offset = offset if offset else datetime.timedelta() + return time.replace(tzinfo=None) - offset + else: + return time + + +class Version(Enum): + v1 = 0 + v3 = 2 + + +def load_pem_x509_certificate(data, backend): + return backend.load_pem_x509_certificate(data) + + +def load_der_x509_certificate(data, backend): + return backend.load_der_x509_certificate(data) + + +def load_pem_x509_csr(data, backend): + return backend.load_pem_x509_csr(data) + + +def load_der_x509_csr(data, backend): + return backend.load_der_x509_csr(data) + + +def load_pem_x509_crl(data, backend): + return backend.load_pem_x509_crl(data) + + +def load_der_x509_crl(data, backend): + return backend.load_der_x509_crl(data) + + +class InvalidVersion(Exception): + def __init__(self, msg, parsed_version): + super(InvalidVersion, self).__init__(msg) + self.parsed_version = parsed_version + + +@six.add_metaclass(abc.ABCMeta) +class Certificate(object): + @abc.abstractmethod + def fingerprint(self, algorithm): + """ + Returns bytes using digest passed. + """ + + @abc.abstractproperty + def serial_number(self): + """ + Returns certificate serial number + """ + + @abc.abstractproperty + def version(self): + """ + Returns the certificate version + """ + + @abc.abstractmethod + def public_key(self): + """ + Returns the public key + """ + + @abc.abstractproperty + def not_valid_before(self): + """ + Not before time (represented as UTC datetime) + """ + + @abc.abstractproperty + def not_valid_after(self): + """ + Not after time (represented as UTC datetime) + """ + + @abc.abstractproperty + def issuer(self): + """ + Returns the issuer name object. + """ + + @abc.abstractproperty + def subject(self): + """ + Returns the subject name object. + """ + + @abc.abstractproperty + def signature_hash_algorithm(self): + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self): + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns an Extensions object. + """ + + @abc.abstractproperty + def signature(self): + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certificate_bytes(self): + """ + Returns the tbsCertificate payload bytes as defined in RFC 5280. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Checks equality. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Checks not equal. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Computes a hash. + """ + + @abc.abstractmethod + def public_bytes(self, encoding): + """ + Serializes the certificate to PEM or DER format. + """ + + +@six.add_metaclass(abc.ABCMeta) +class CertificateRevocationList(object): + @abc.abstractmethod + def public_bytes(self, encoding): + """ + Serializes the CRL to PEM or DER format. + """ + + @abc.abstractmethod + def fingerprint(self, algorithm): + """ + Returns bytes using digest passed. + """ + + @abc.abstractmethod + def get_revoked_certificate_by_serial_number(self, serial_number): + """ + Returns an instance of RevokedCertificate or None if the serial_number + is not in the CRL. + """ + + @abc.abstractproperty + def signature_hash_algorithm(self): + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self): + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def issuer(self): + """ + Returns the X509Name with the issuer of this CRL. + """ + + @abc.abstractproperty + def next_update(self): + """ + Returns the date of next update for this CRL. + """ + + @abc.abstractproperty + def last_update(self): + """ + Returns the date of last update for this CRL. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns an Extensions object containing a list of CRL extensions. + """ + + @abc.abstractproperty + def signature(self): + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certlist_bytes(self): + """ + Returns the tbsCertList payload bytes as defined in RFC 5280. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Checks equality. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Checks not equal. + """ + + @abc.abstractmethod + def is_signature_valid(self, public_key): + """ + Verifies signature of revocation list against given public key. + """ + + +@six.add_metaclass(abc.ABCMeta) +class CertificateSigningRequest(object): + @abc.abstractmethod + def __eq__(self, other): + """ + Checks equality. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Checks not equal. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Computes a hash. + """ + + @abc.abstractmethod + def public_key(self): + """ + Returns the public key + """ + + @abc.abstractproperty + def subject(self): + """ + Returns the subject name object. + """ + + @abc.abstractproperty + def signature_hash_algorithm(self): + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self): + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns the extensions in the signing request. + """ + + @abc.abstractmethod + def public_bytes(self, encoding): + """ + Encodes the request to PEM or DER format. + """ + + @abc.abstractproperty + def signature(self): + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certrequest_bytes(self): + """ + Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC + 2986. + """ + + @abc.abstractproperty + def is_signature_valid(self): + """ + Verifies signature of signing request. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RevokedCertificate(object): + @abc.abstractproperty + def serial_number(self): + """ + Returns the serial number of the revoked certificate. + """ + + @abc.abstractproperty + def revocation_date(self): + """ + Returns the date of when this certificate was revoked. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns an Extensions object containing a list of Revoked extensions. + """ + + +class CertificateSigningRequestBuilder(object): + def __init__(self, subject_name=None, extensions=[]): + """ + Creates an empty X.509 certificate request (v1). + """ + self._subject_name = subject_name + self._extensions = extensions + + def subject_name(self, name): + """ + Sets the certificate requestor's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError('Expecting x509.Name object.') + if self._subject_name is not None: + raise ValueError('The subject name may only be set once.') + return CertificateSigningRequestBuilder(name, self._extensions) + + def add_extension(self, extension, critical): + """ + Adds an X.509 extension to the certificate request. + """ + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + return CertificateSigningRequestBuilder( + self._subject_name, self._extensions + [extension] + ) + + def sign(self, private_key, algorithm, backend): + """ + Signs the request using the requestor's private key. + """ + if self._subject_name is None: + raise ValueError("A CertificateSigningRequest must have a subject") + return backend.create_x509_csr(self, private_key, algorithm) + + +class CertificateBuilder(object): + def __init__(self, issuer_name=None, subject_name=None, + public_key=None, serial_number=None, not_valid_before=None, + not_valid_after=None, extensions=[]): + self._version = Version.v3 + self._issuer_name = issuer_name + self._subject_name = subject_name + self._public_key = public_key + self._serial_number = serial_number + self._not_valid_before = not_valid_before + self._not_valid_after = not_valid_after + self._extensions = extensions + + def issuer_name(self, name): + """ + Sets the CA's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError('Expecting x509.Name object.') + if self._issuer_name is not None: + raise ValueError('The issuer name may only be set once.') + return CertificateBuilder( + name, self._subject_name, self._public_key, + self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def subject_name(self, name): + """ + Sets the requestor's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError('Expecting x509.Name object.') + if self._subject_name is not None: + raise ValueError('The subject name may only be set once.') + return CertificateBuilder( + self._issuer_name, name, self._public_key, + self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def public_key(self, key): + """ + Sets the requestor's public key (as found in the signing request). + """ + if not isinstance(key, (dsa.DSAPublicKey, rsa.RSAPublicKey, + ec.EllipticCurvePublicKey)): + raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey,' + ' or EllipticCurvePublicKey.') + if self._public_key is not None: + raise ValueError('The public key may only be set once.') + return CertificateBuilder( + self._issuer_name, self._subject_name, key, + self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def serial_number(self, number): + """ + Sets the certificate serial number. + """ + if not isinstance(number, six.integer_types): + raise TypeError('Serial number must be of integral type.') + if self._serial_number is not None: + raise ValueError('The serial number may only be set once.') + if number <= 0: + raise ValueError('The serial number should be positive.') + + # ASN.1 integers are always signed, so most significant bit must be + # zero. + if number.bit_length() >= 160: # As defined in RFC 5280 + raise ValueError('The serial number should not be more than 159 ' + 'bits.') + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def not_valid_before(self, time): + """ + Sets the certificate activation time. + """ + if not isinstance(time, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._not_valid_before is not None: + raise ValueError('The not valid before may only be set once.') + time = _convert_to_naive_utc_time(time) + if time <= _UNIX_EPOCH: + raise ValueError('The not valid before date must be after the unix' + ' epoch (1970 January 1).') + if self._not_valid_after is not None and time > self._not_valid_after: + raise ValueError( + 'The not valid before date must be before the not valid after ' + 'date.' + ) + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, self._serial_number, time, + self._not_valid_after, self._extensions + ) + + def not_valid_after(self, time): + """ + Sets the certificate expiration time. + """ + if not isinstance(time, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._not_valid_after is not None: + raise ValueError('The not valid after may only be set once.') + time = _convert_to_naive_utc_time(time) + if time <= _UNIX_EPOCH: + raise ValueError('The not valid after date must be after the unix' + ' epoch (1970 January 1).') + if (self._not_valid_before is not None and + time < self._not_valid_before): + raise ValueError( + 'The not valid after date must be after the not valid before ' + 'date.' + ) + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, self._serial_number, self._not_valid_before, + time, self._extensions + ) + + def add_extension(self, extension, critical): + """ + Adds an X.509 extension to the certificate. + """ + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + [extension] + ) + + def sign(self, private_key, algorithm, backend): + """ + Signs the certificate using the CA's private key. + """ + if self._subject_name is None: + raise ValueError("A certificate must have a subject name") + + if self._issuer_name is None: + raise ValueError("A certificate must have an issuer name") + + if self._serial_number is None: + raise ValueError("A certificate must have a serial number") + + if self._not_valid_before is None: + raise ValueError("A certificate must have a not valid before time") + + if self._not_valid_after is None: + raise ValueError("A certificate must have a not valid after time") + + if self._public_key is None: + raise ValueError("A certificate must have a public key") + + return backend.create_x509_certificate(self, private_key, algorithm) + + +class CertificateRevocationListBuilder(object): + def __init__(self, issuer_name=None, last_update=None, next_update=None, + extensions=[], revoked_certificates=[]): + self._issuer_name = issuer_name + self._last_update = last_update + self._next_update = next_update + self._extensions = extensions + self._revoked_certificates = revoked_certificates + + def issuer_name(self, issuer_name): + if not isinstance(issuer_name, Name): + raise TypeError('Expecting x509.Name object.') + if self._issuer_name is not None: + raise ValueError('The issuer name may only be set once.') + return CertificateRevocationListBuilder( + issuer_name, self._last_update, self._next_update, + self._extensions, self._revoked_certificates + ) + + def last_update(self, last_update): + if not isinstance(last_update, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._last_update is not None: + raise ValueError('Last update may only be set once.') + last_update = _convert_to_naive_utc_time(last_update) + if last_update <= _UNIX_EPOCH: + raise ValueError('The last update date must be after the unix' + ' epoch (1970 January 1).') + if self._next_update is not None and last_update > self._next_update: + raise ValueError( + 'The last update date must be before the next update date.' + ) + return CertificateRevocationListBuilder( + self._issuer_name, last_update, self._next_update, + self._extensions, self._revoked_certificates + ) + + def next_update(self, next_update): + if not isinstance(next_update, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._next_update is not None: + raise ValueError('Last update may only be set once.') + next_update = _convert_to_naive_utc_time(next_update) + if next_update <= _UNIX_EPOCH: + raise ValueError('The last update date must be after the unix' + ' epoch (1970 January 1).') + if self._last_update is not None and next_update < self._last_update: + raise ValueError( + 'The next update date must be after the last update date.' + ) + return CertificateRevocationListBuilder( + self._issuer_name, self._last_update, next_update, + self._extensions, self._revoked_certificates + ) + + def add_extension(self, extension, critical): + """ + Adds an X.509 extension to the certificate revocation list. + """ + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + return CertificateRevocationListBuilder( + self._issuer_name, self._last_update, self._next_update, + self._extensions + [extension], self._revoked_certificates + ) + + def add_revoked_certificate(self, revoked_certificate): + """ + Adds a revoked certificate to the CRL. + """ + if not isinstance(revoked_certificate, RevokedCertificate): + raise TypeError("Must be an instance of RevokedCertificate") + + return CertificateRevocationListBuilder( + self._issuer_name, self._last_update, + self._next_update, self._extensions, + self._revoked_certificates + [revoked_certificate] + ) + + def sign(self, private_key, algorithm, backend): + if self._issuer_name is None: + raise ValueError("A CRL must have an issuer name") + + if self._last_update is None: + raise ValueError("A CRL must have a last update time") + + if self._next_update is None: + raise ValueError("A CRL must have a next update time") + + return backend.create_x509_crl(self, private_key, algorithm) + + +class RevokedCertificateBuilder(object): + def __init__(self, serial_number=None, revocation_date=None, + extensions=[]): + self._serial_number = serial_number + self._revocation_date = revocation_date + self._extensions = extensions + + def serial_number(self, number): + if not isinstance(number, six.integer_types): + raise TypeError('Serial number must be of integral type.') + if self._serial_number is not None: + raise ValueError('The serial number may only be set once.') + if number <= 0: + raise ValueError('The serial number should be positive') + + # ASN.1 integers are always signed, so most significant bit must be + # zero. + if number.bit_length() >= 160: # As defined in RFC 5280 + raise ValueError('The serial number should not be more than 159 ' + 'bits.') + return RevokedCertificateBuilder( + number, self._revocation_date, self._extensions + ) + + def revocation_date(self, time): + if not isinstance(time, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._revocation_date is not None: + raise ValueError('The revocation date may only be set once.') + time = _convert_to_naive_utc_time(time) + if time <= _UNIX_EPOCH: + raise ValueError('The revocation date must be after the unix' + ' epoch (1970 January 1).') + return RevokedCertificateBuilder( + self._serial_number, time, self._extensions + ) + + def add_extension(self, extension, critical): + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + return RevokedCertificateBuilder( + self._serial_number, self._revocation_date, + self._extensions + [extension] + ) + + def build(self, backend): + if self._serial_number is None: + raise ValueError("A revoked certificate must have a serial number") + if self._revocation_date is None: + raise ValueError( + "A revoked certificate must have a revocation date" + ) + + return backend.create_x509_revoked_certificate(self) + + +def random_serial_number(): + return utils.int_from_bytes(os.urandom(20), "big") >> 1 diff --git a/server/www/packages/packages-common/cryptography/x509/certificate_transparency.py b/server/www/packages/packages-common/cryptography/x509/certificate_transparency.py new file mode 100644 index 0000000..d00fe81 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/x509/certificate_transparency.py @@ -0,0 +1,46 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +from enum import Enum + +import six + + +class LogEntryType(Enum): + X509_CERTIFICATE = 0 + PRE_CERTIFICATE = 1 + + +class Version(Enum): + v1 = 0 + + +@six.add_metaclass(abc.ABCMeta) +class SignedCertificateTimestamp(object): + @abc.abstractproperty + def version(self): + """ + Returns the SCT version. + """ + + @abc.abstractproperty + def log_id(self): + """ + Returns an identifier indicating which log this SCT is for. + """ + + @abc.abstractproperty + def timestamp(self): + """ + Returns the timestamp for this SCT. + """ + + @abc.abstractproperty + def entry_type(self): + """ + Returns whether this is an SCT for a certificate or pre-certificate. + """ diff --git a/server/www/packages/packages-common/cryptography/x509/extensions.py b/server/www/packages/packages-common/cryptography/x509/extensions.py new file mode 100644 index 0000000..eb4b927 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/x509/extensions.py @@ -0,0 +1,1429 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import datetime +import hashlib +import ipaddress +from enum import Enum + +from asn1crypto.keys import PublicKeyInfo + +import six + +from cryptography import utils +from cryptography.hazmat.primitives import constant_time, serialization +from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey +from cryptography.x509.certificate_transparency import ( + SignedCertificateTimestamp +) +from cryptography.x509.general_name import GeneralName, IPAddress, OtherName +from cryptography.x509.name import RelativeDistinguishedName +from cryptography.x509.oid import ( + CRLEntryExtensionOID, ExtensionOID, ObjectIdentifier +) + + +def _key_identifier_from_public_key(public_key): + if isinstance(public_key, RSAPublicKey): + data = public_key.public_bytes( + serialization.Encoding.DER, + serialization.PublicFormat.PKCS1, + ) + elif isinstance(public_key, EllipticCurvePublicKey): + data = public_key.public_numbers().encode_point() + else: + # This is a very slow way to do this. + serialized = public_key.public_bytes( + serialization.Encoding.DER, + serialization.PublicFormat.SubjectPublicKeyInfo + ) + + data = six.binary_type(PublicKeyInfo.load(serialized)['public_key']) + + return hashlib.sha1(data).digest() + + +class DuplicateExtension(Exception): + def __init__(self, msg, oid): + super(DuplicateExtension, self).__init__(msg) + self.oid = oid + + +class ExtensionNotFound(Exception): + def __init__(self, msg, oid): + super(ExtensionNotFound, self).__init__(msg) + self.oid = oid + + +@six.add_metaclass(abc.ABCMeta) +class ExtensionType(object): + @abc.abstractproperty + def oid(self): + """ + Returns the oid associated with the given extension type. + """ + + +class Extensions(object): + def __init__(self, extensions): + self._extensions = extensions + + def get_extension_for_oid(self, oid): + for ext in self: + if ext.oid == oid: + return ext + + raise ExtensionNotFound("No {0} extension was found".format(oid), oid) + + def get_extension_for_class(self, extclass): + if extclass is UnrecognizedExtension: + raise TypeError( + "UnrecognizedExtension can't be used with " + "get_extension_for_class because more than one instance of the" + " class may be present." + ) + + for ext in self: + if isinstance(ext.value, extclass): + return ext + + raise ExtensionNotFound( + "No {0} extension was found".format(extclass), extclass.oid + ) + + def __iter__(self): + return iter(self._extensions) + + def __len__(self): + return len(self._extensions) + + def __getitem__(self, idx): + return self._extensions[idx] + + def __repr__(self): + return ( + "".format(self._extensions) + ) + + +@utils.register_interface(ExtensionType) +class CRLNumber(object): + oid = ExtensionOID.CRL_NUMBER + + def __init__(self, crl_number): + if not isinstance(crl_number, six.integer_types): + raise TypeError("crl_number must be an integer") + + self._crl_number = crl_number + + def __eq__(self, other): + if not isinstance(other, CRLNumber): + return NotImplemented + + return self.crl_number == other.crl_number + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.crl_number) + + def __repr__(self): + return "".format(self.crl_number) + + crl_number = utils.read_only_property("_crl_number") + + +@utils.register_interface(ExtensionType) +class AuthorityKeyIdentifier(object): + oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER + + def __init__(self, key_identifier, authority_cert_issuer, + authority_cert_serial_number): + if (authority_cert_issuer is None) != ( + authority_cert_serial_number is None + ): + raise ValueError( + "authority_cert_issuer and authority_cert_serial_number " + "must both be present or both None" + ) + + if authority_cert_issuer is not None: + authority_cert_issuer = list(authority_cert_issuer) + if not all( + isinstance(x, GeneralName) for x in authority_cert_issuer + ): + raise TypeError( + "authority_cert_issuer must be a list of GeneralName " + "objects" + ) + + if authority_cert_serial_number is not None and not isinstance( + authority_cert_serial_number, six.integer_types + ): + raise TypeError( + "authority_cert_serial_number must be an integer" + ) + + self._key_identifier = key_identifier + self._authority_cert_issuer = authority_cert_issuer + self._authority_cert_serial_number = authority_cert_serial_number + + @classmethod + def from_issuer_public_key(cls, public_key): + digest = _key_identifier_from_public_key(public_key) + return cls( + key_identifier=digest, + authority_cert_issuer=None, + authority_cert_serial_number=None + ) + + @classmethod + def from_issuer_subject_key_identifier(cls, ski): + return cls( + key_identifier=ski.value.digest, + authority_cert_issuer=None, + authority_cert_serial_number=None + ) + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, AuthorityKeyIdentifier): + return NotImplemented + + return ( + self.key_identifier == other.key_identifier and + self.authority_cert_issuer == other.authority_cert_issuer and + self.authority_cert_serial_number == + other.authority_cert_serial_number + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self.authority_cert_issuer is None: + aci = None + else: + aci = tuple(self.authority_cert_issuer) + return hash(( + self.key_identifier, aci, self.authority_cert_serial_number + )) + + key_identifier = utils.read_only_property("_key_identifier") + authority_cert_issuer = utils.read_only_property("_authority_cert_issuer") + authority_cert_serial_number = utils.read_only_property( + "_authority_cert_serial_number" + ) + + +@utils.register_interface(ExtensionType) +class SubjectKeyIdentifier(object): + oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER + + def __init__(self, digest): + self._digest = digest + + @classmethod + def from_public_key(cls, public_key): + return cls(_key_identifier_from_public_key(public_key)) + + digest = utils.read_only_property("_digest") + + def __repr__(self): + return "".format(self.digest) + + def __eq__(self, other): + if not isinstance(other, SubjectKeyIdentifier): + return NotImplemented + + return constant_time.bytes_eq(self.digest, other.digest) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.digest) + + +@utils.register_interface(ExtensionType) +class AuthorityInformationAccess(object): + oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS + + def __init__(self, descriptions): + descriptions = list(descriptions) + if not all(isinstance(x, AccessDescription) for x in descriptions): + raise TypeError( + "Every item in the descriptions list must be an " + "AccessDescription" + ) + + self._descriptions = descriptions + + def __iter__(self): + return iter(self._descriptions) + + def __len__(self): + return len(self._descriptions) + + def __repr__(self): + return "".format(self._descriptions) + + def __eq__(self, other): + if not isinstance(other, AuthorityInformationAccess): + return NotImplemented + + return self._descriptions == other._descriptions + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._descriptions[idx] + + def __hash__(self): + return hash(tuple(self._descriptions)) + + +class AccessDescription(object): + def __init__(self, access_method, access_location): + if not isinstance(access_method, ObjectIdentifier): + raise TypeError("access_method must be an ObjectIdentifier") + + if not isinstance(access_location, GeneralName): + raise TypeError("access_location must be a GeneralName") + + self._access_method = access_method + self._access_location = access_location + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, AccessDescription): + return NotImplemented + + return ( + self.access_method == other.access_method and + self.access_location == other.access_location + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.access_method, self.access_location)) + + access_method = utils.read_only_property("_access_method") + access_location = utils.read_only_property("_access_location") + + +@utils.register_interface(ExtensionType) +class BasicConstraints(object): + oid = ExtensionOID.BASIC_CONSTRAINTS + + def __init__(self, ca, path_length): + if not isinstance(ca, bool): + raise TypeError("ca must be a boolean value") + + if path_length is not None and not ca: + raise ValueError("path_length must be None when ca is False") + + if ( + path_length is not None and + (not isinstance(path_length, six.integer_types) or path_length < 0) + ): + raise TypeError( + "path_length must be a non-negative integer or None" + ) + + self._ca = ca + self._path_length = path_length + + ca = utils.read_only_property("_ca") + path_length = utils.read_only_property("_path_length") + + def __repr__(self): + return ("").format(self) + + def __eq__(self, other): + if not isinstance(other, BasicConstraints): + return NotImplemented + + return self.ca == other.ca and self.path_length == other.path_length + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.ca, self.path_length)) + + +@utils.register_interface(ExtensionType) +class DeltaCRLIndicator(object): + oid = ExtensionOID.DELTA_CRL_INDICATOR + + def __init__(self, crl_number): + if not isinstance(crl_number, six.integer_types): + raise TypeError("crl_number must be an integer") + + self._crl_number = crl_number + + crl_number = utils.read_only_property("_crl_number") + + def __eq__(self, other): + if not isinstance(other, DeltaCRLIndicator): + return NotImplemented + + return self.crl_number == other.crl_number + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.crl_number) + + def __repr__(self): + return "".format(self) + + +@utils.register_interface(ExtensionType) +class CRLDistributionPoints(object): + oid = ExtensionOID.CRL_DISTRIBUTION_POINTS + + def __init__(self, distribution_points): + distribution_points = list(distribution_points) + if not all( + isinstance(x, DistributionPoint) for x in distribution_points + ): + raise TypeError( + "distribution_points must be a list of DistributionPoint " + "objects" + ) + + self._distribution_points = distribution_points + + def __iter__(self): + return iter(self._distribution_points) + + def __len__(self): + return len(self._distribution_points) + + def __repr__(self): + return "".format(self._distribution_points) + + def __eq__(self, other): + if not isinstance(other, CRLDistributionPoints): + return NotImplemented + + return self._distribution_points == other._distribution_points + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._distribution_points[idx] + + def __hash__(self): + return hash(tuple(self._distribution_points)) + + +@utils.register_interface(ExtensionType) +class FreshestCRL(object): + oid = ExtensionOID.FRESHEST_CRL + + def __init__(self, distribution_points): + distribution_points = list(distribution_points) + if not all( + isinstance(x, DistributionPoint) for x in distribution_points + ): + raise TypeError( + "distribution_points must be a list of DistributionPoint " + "objects" + ) + + self._distribution_points = distribution_points + + def __iter__(self): + return iter(self._distribution_points) + + def __len__(self): + return len(self._distribution_points) + + def __repr__(self): + return "".format(self._distribution_points) + + def __eq__(self, other): + if not isinstance(other, FreshestCRL): + return NotImplemented + + return self._distribution_points == other._distribution_points + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._distribution_points[idx] + + def __hash__(self): + return hash(tuple(self._distribution_points)) + + +class DistributionPoint(object): + def __init__(self, full_name, relative_name, reasons, crl_issuer): + if full_name and relative_name: + raise ValueError( + "You cannot provide both full_name and relative_name, at " + "least one must be None." + ) + + if full_name: + full_name = list(full_name) + if not all(isinstance(x, GeneralName) for x in full_name): + raise TypeError( + "full_name must be a list of GeneralName objects" + ) + + if relative_name: + if not isinstance(relative_name, RelativeDistinguishedName): + raise TypeError( + "relative_name must be a RelativeDistinguishedName" + ) + + if crl_issuer: + crl_issuer = list(crl_issuer) + if not all(isinstance(x, GeneralName) for x in crl_issuer): + raise TypeError( + "crl_issuer must be None or a list of general names" + ) + + if reasons and (not isinstance(reasons, frozenset) or not all( + isinstance(x, ReasonFlags) for x in reasons + )): + raise TypeError("reasons must be None or frozenset of ReasonFlags") + + if reasons and ( + ReasonFlags.unspecified in reasons or + ReasonFlags.remove_from_crl in reasons + ): + raise ValueError( + "unspecified and remove_from_crl are not valid reasons in a " + "DistributionPoint" + ) + + if reasons and not crl_issuer and not (full_name or relative_name): + raise ValueError( + "You must supply crl_issuer, full_name, or relative_name when " + "reasons is not None" + ) + + self._full_name = full_name + self._relative_name = relative_name + self._reasons = reasons + self._crl_issuer = crl_issuer + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, DistributionPoint): + return NotImplemented + + return ( + self.full_name == other.full_name and + self.relative_name == other.relative_name and + self.reasons == other.reasons and + self.crl_issuer == other.crl_issuer + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self.full_name is not None: + fn = tuple(self.full_name) + else: + fn = None + + if self.crl_issuer is not None: + crl_issuer = tuple(self.crl_issuer) + else: + crl_issuer = None + + return hash((fn, self.relative_name, self.reasons, crl_issuer)) + + full_name = utils.read_only_property("_full_name") + relative_name = utils.read_only_property("_relative_name") + reasons = utils.read_only_property("_reasons") + crl_issuer = utils.read_only_property("_crl_issuer") + + +class ReasonFlags(Enum): + unspecified = "unspecified" + key_compromise = "keyCompromise" + ca_compromise = "cACompromise" + affiliation_changed = "affiliationChanged" + superseded = "superseded" + cessation_of_operation = "cessationOfOperation" + certificate_hold = "certificateHold" + privilege_withdrawn = "privilegeWithdrawn" + aa_compromise = "aACompromise" + remove_from_crl = "removeFromCRL" + + +@utils.register_interface(ExtensionType) +class PolicyConstraints(object): + oid = ExtensionOID.POLICY_CONSTRAINTS + + def __init__(self, require_explicit_policy, inhibit_policy_mapping): + if require_explicit_policy is not None and not isinstance( + require_explicit_policy, six.integer_types + ): + raise TypeError( + "require_explicit_policy must be a non-negative integer or " + "None" + ) + + if inhibit_policy_mapping is not None and not isinstance( + inhibit_policy_mapping, six.integer_types + ): + raise TypeError( + "inhibit_policy_mapping must be a non-negative integer or None" + ) + + if inhibit_policy_mapping is None and require_explicit_policy is None: + raise ValueError( + "At least one of require_explicit_policy and " + "inhibit_policy_mapping must not be None" + ) + + self._require_explicit_policy = require_explicit_policy + self._inhibit_policy_mapping = inhibit_policy_mapping + + def __repr__(self): + return ( + u"".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, PolicyConstraints): + return NotImplemented + + return ( + self.require_explicit_policy == other.require_explicit_policy and + self.inhibit_policy_mapping == other.inhibit_policy_mapping + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash( + (self.require_explicit_policy, self.inhibit_policy_mapping) + ) + + require_explicit_policy = utils.read_only_property( + "_require_explicit_policy" + ) + inhibit_policy_mapping = utils.read_only_property( + "_inhibit_policy_mapping" + ) + + +@utils.register_interface(ExtensionType) +class CertificatePolicies(object): + oid = ExtensionOID.CERTIFICATE_POLICIES + + def __init__(self, policies): + policies = list(policies) + if not all(isinstance(x, PolicyInformation) for x in policies): + raise TypeError( + "Every item in the policies list must be a " + "PolicyInformation" + ) + + self._policies = policies + + def __iter__(self): + return iter(self._policies) + + def __len__(self): + return len(self._policies) + + def __repr__(self): + return "".format(self._policies) + + def __eq__(self, other): + if not isinstance(other, CertificatePolicies): + return NotImplemented + + return self._policies == other._policies + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._policies[idx] + + def __hash__(self): + return hash(tuple(self._policies)) + + +class PolicyInformation(object): + def __init__(self, policy_identifier, policy_qualifiers): + if not isinstance(policy_identifier, ObjectIdentifier): + raise TypeError("policy_identifier must be an ObjectIdentifier") + + self._policy_identifier = policy_identifier + + if policy_qualifiers: + policy_qualifiers = list(policy_qualifiers) + if not all( + isinstance(x, (six.text_type, UserNotice)) + for x in policy_qualifiers + ): + raise TypeError( + "policy_qualifiers must be a list of strings and/or " + "UserNotice objects or None" + ) + + self._policy_qualifiers = policy_qualifiers + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, PolicyInformation): + return NotImplemented + + return ( + self.policy_identifier == other.policy_identifier and + self.policy_qualifiers == other.policy_qualifiers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self.policy_qualifiers is not None: + pq = tuple(self.policy_qualifiers) + else: + pq = None + + return hash((self.policy_identifier, pq)) + + policy_identifier = utils.read_only_property("_policy_identifier") + policy_qualifiers = utils.read_only_property("_policy_qualifiers") + + +class UserNotice(object): + def __init__(self, notice_reference, explicit_text): + if notice_reference and not isinstance( + notice_reference, NoticeReference + ): + raise TypeError( + "notice_reference must be None or a NoticeReference" + ) + + self._notice_reference = notice_reference + self._explicit_text = explicit_text + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, UserNotice): + return NotImplemented + + return ( + self.notice_reference == other.notice_reference and + self.explicit_text == other.explicit_text + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.notice_reference, self.explicit_text)) + + notice_reference = utils.read_only_property("_notice_reference") + explicit_text = utils.read_only_property("_explicit_text") + + +class NoticeReference(object): + def __init__(self, organization, notice_numbers): + self._organization = organization + notice_numbers = list(notice_numbers) + if not all(isinstance(x, int) for x in notice_numbers): + raise TypeError( + "notice_numbers must be a list of integers" + ) + + self._notice_numbers = notice_numbers + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, NoticeReference): + return NotImplemented + + return ( + self.organization == other.organization and + self.notice_numbers == other.notice_numbers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.organization, tuple(self.notice_numbers))) + + organization = utils.read_only_property("_organization") + notice_numbers = utils.read_only_property("_notice_numbers") + + +@utils.register_interface(ExtensionType) +class ExtendedKeyUsage(object): + oid = ExtensionOID.EXTENDED_KEY_USAGE + + def __init__(self, usages): + usages = list(usages) + if not all(isinstance(x, ObjectIdentifier) for x in usages): + raise TypeError( + "Every item in the usages list must be an ObjectIdentifier" + ) + + self._usages = usages + + def __iter__(self): + return iter(self._usages) + + def __len__(self): + return len(self._usages) + + def __repr__(self): + return "".format(self._usages) + + def __eq__(self, other): + if not isinstance(other, ExtendedKeyUsage): + return NotImplemented + + return self._usages == other._usages + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(tuple(self._usages)) + + +@utils.register_interface(ExtensionType) +class OCSPNoCheck(object): + oid = ExtensionOID.OCSP_NO_CHECK + + +@utils.register_interface(ExtensionType) +class TLSFeature(object): + oid = ExtensionOID.TLS_FEATURE + + def __init__(self, features): + features = list(features) + if ( + not all(isinstance(x, TLSFeatureType) for x in features) or + len(features) == 0 + ): + raise TypeError( + "features must be a list of elements from the TLSFeatureType " + "enum" + ) + + self._features = features + + def __iter__(self): + return iter(self._features) + + def __len__(self): + return len(self._features) + + def __repr__(self): + return "".format(self) + + def __eq__(self, other): + if not isinstance(other, TLSFeature): + return NotImplemented + + return self._features == other._features + + def __getitem__(self, idx): + return self._features[idx] + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(tuple(self._features)) + + +class TLSFeatureType(Enum): + # status_request is defined in RFC 6066 and is used for what is commonly + # called OCSP Must-Staple when present in the TLS Feature extension in an + # X.509 certificate. + status_request = 5 + # status_request_v2 is defined in RFC 6961 and allows multiple OCSP + # responses to be provided. It is not currently in use by clients or + # servers. + status_request_v2 = 17 + + +_TLS_FEATURE_TYPE_TO_ENUM = dict((x.value, x) for x in TLSFeatureType) + + +@utils.register_interface(ExtensionType) +class InhibitAnyPolicy(object): + oid = ExtensionOID.INHIBIT_ANY_POLICY + + def __init__(self, skip_certs): + if not isinstance(skip_certs, six.integer_types): + raise TypeError("skip_certs must be an integer") + + if skip_certs < 0: + raise ValueError("skip_certs must be a non-negative integer") + + self._skip_certs = skip_certs + + def __repr__(self): + return "".format(self) + + def __eq__(self, other): + if not isinstance(other, InhibitAnyPolicy): + return NotImplemented + + return self.skip_certs == other.skip_certs + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.skip_certs) + + skip_certs = utils.read_only_property("_skip_certs") + + +@utils.register_interface(ExtensionType) +class KeyUsage(object): + oid = ExtensionOID.KEY_USAGE + + def __init__(self, digital_signature, content_commitment, key_encipherment, + data_encipherment, key_agreement, key_cert_sign, crl_sign, + encipher_only, decipher_only): + if not key_agreement and (encipher_only or decipher_only): + raise ValueError( + "encipher_only and decipher_only can only be true when " + "key_agreement is true" + ) + + self._digital_signature = digital_signature + self._content_commitment = content_commitment + self._key_encipherment = key_encipherment + self._data_encipherment = data_encipherment + self._key_agreement = key_agreement + self._key_cert_sign = key_cert_sign + self._crl_sign = crl_sign + self._encipher_only = encipher_only + self._decipher_only = decipher_only + + digital_signature = utils.read_only_property("_digital_signature") + content_commitment = utils.read_only_property("_content_commitment") + key_encipherment = utils.read_only_property("_key_encipherment") + data_encipherment = utils.read_only_property("_data_encipherment") + key_agreement = utils.read_only_property("_key_agreement") + key_cert_sign = utils.read_only_property("_key_cert_sign") + crl_sign = utils.read_only_property("_crl_sign") + + @property + def encipher_only(self): + if not self.key_agreement: + raise ValueError( + "encipher_only is undefined unless key_agreement is true" + ) + else: + return self._encipher_only + + @property + def decipher_only(self): + if not self.key_agreement: + raise ValueError( + "decipher_only is undefined unless key_agreement is true" + ) + else: + return self._decipher_only + + def __repr__(self): + try: + encipher_only = self.encipher_only + decipher_only = self.decipher_only + except ValueError: + encipher_only = None + decipher_only = None + + return ("").format( + self, encipher_only, decipher_only) + + def __eq__(self, other): + if not isinstance(other, KeyUsage): + return NotImplemented + + return ( + self.digital_signature == other.digital_signature and + self.content_commitment == other.content_commitment and + self.key_encipherment == other.key_encipherment and + self.data_encipherment == other.data_encipherment and + self.key_agreement == other.key_agreement and + self.key_cert_sign == other.key_cert_sign and + self.crl_sign == other.crl_sign and + self._encipher_only == other._encipher_only and + self._decipher_only == other._decipher_only + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(( + self.digital_signature, self.content_commitment, + self.key_encipherment, self.data_encipherment, + self.key_agreement, self.key_cert_sign, + self.crl_sign, self._encipher_only, + self._decipher_only + )) + + +@utils.register_interface(ExtensionType) +class NameConstraints(object): + oid = ExtensionOID.NAME_CONSTRAINTS + + def __init__(self, permitted_subtrees, excluded_subtrees): + if permitted_subtrees is not None: + permitted_subtrees = list(permitted_subtrees) + if not all( + isinstance(x, GeneralName) for x in permitted_subtrees + ): + raise TypeError( + "permitted_subtrees must be a list of GeneralName objects " + "or None" + ) + + self._validate_ip_name(permitted_subtrees) + + if excluded_subtrees is not None: + excluded_subtrees = list(excluded_subtrees) + if not all( + isinstance(x, GeneralName) for x in excluded_subtrees + ): + raise TypeError( + "excluded_subtrees must be a list of GeneralName objects " + "or None" + ) + + self._validate_ip_name(excluded_subtrees) + + if permitted_subtrees is None and excluded_subtrees is None: + raise ValueError( + "At least one of permitted_subtrees and excluded_subtrees " + "must not be None" + ) + + self._permitted_subtrees = permitted_subtrees + self._excluded_subtrees = excluded_subtrees + + def __eq__(self, other): + if not isinstance(other, NameConstraints): + return NotImplemented + + return ( + self.excluded_subtrees == other.excluded_subtrees and + self.permitted_subtrees == other.permitted_subtrees + ) + + def __ne__(self, other): + return not self == other + + def _validate_ip_name(self, tree): + if any(isinstance(name, IPAddress) and not isinstance( + name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network) + ) for name in tree): + raise TypeError( + "IPAddress name constraints must be an IPv4Network or" + " IPv6Network object" + ) + + def __repr__(self): + return ( + u"".format(self) + ) + + def __hash__(self): + if self.permitted_subtrees is not None: + ps = tuple(self.permitted_subtrees) + else: + ps = None + + if self.excluded_subtrees is not None: + es = tuple(self.excluded_subtrees) + else: + es = None + + return hash((ps, es)) + + permitted_subtrees = utils.read_only_property("_permitted_subtrees") + excluded_subtrees = utils.read_only_property("_excluded_subtrees") + + +class Extension(object): + def __init__(self, oid, critical, value): + if not isinstance(oid, ObjectIdentifier): + raise TypeError( + "oid argument must be an ObjectIdentifier instance." + ) + + if not isinstance(critical, bool): + raise TypeError("critical must be a boolean value") + + self._oid = oid + self._critical = critical + self._value = value + + oid = utils.read_only_property("_oid") + critical = utils.read_only_property("_critical") + value = utils.read_only_property("_value") + + def __repr__(self): + return ("").format(self) + + def __eq__(self, other): + if not isinstance(other, Extension): + return NotImplemented + + return ( + self.oid == other.oid and + self.critical == other.critical and + self.value == other.value + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.oid, self.critical, self.value)) + + +class GeneralNames(object): + def __init__(self, general_names): + general_names = list(general_names) + if not all(isinstance(x, GeneralName) for x in general_names): + raise TypeError( + "Every item in the general_names list must be an " + "object conforming to the GeneralName interface" + ) + + self._general_names = general_names + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + # Return the value of each GeneralName, except for OtherName instances + # which we return directly because it has two important properties not + # just one value. + objs = (i for i in self if isinstance(i, type)) + if type != OtherName: + objs = (i.value for i in objs) + return list(objs) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, GeneralNames): + return NotImplemented + + return self._general_names == other._general_names + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._general_names[idx] + + def __hash__(self): + return hash(tuple(self._general_names)) + + +@utils.register_interface(ExtensionType) +class SubjectAlternativeName(object): + oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME + + def __init__(self, general_names): + self._general_names = GeneralNames(general_names) + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + return self._general_names.get_values_for_type(type) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, SubjectAlternativeName): + return NotImplemented + + return self._general_names == other._general_names + + def __getitem__(self, idx): + return self._general_names[idx] + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self._general_names) + + +@utils.register_interface(ExtensionType) +class IssuerAlternativeName(object): + oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME + + def __init__(self, general_names): + self._general_names = GeneralNames(general_names) + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + return self._general_names.get_values_for_type(type) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, IssuerAlternativeName): + return NotImplemented + + return self._general_names == other._general_names + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._general_names[idx] + + def __hash__(self): + return hash(self._general_names) + + +@utils.register_interface(ExtensionType) +class CertificateIssuer(object): + oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER + + def __init__(self, general_names): + self._general_names = GeneralNames(general_names) + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + return self._general_names.get_values_for_type(type) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, CertificateIssuer): + return NotImplemented + + return self._general_names == other._general_names + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._general_names[idx] + + def __hash__(self): + return hash(self._general_names) + + +@utils.register_interface(ExtensionType) +class CRLReason(object): + oid = CRLEntryExtensionOID.CRL_REASON + + def __init__(self, reason): + if not isinstance(reason, ReasonFlags): + raise TypeError("reason must be an element from ReasonFlags") + + self._reason = reason + + def __repr__(self): + return "".format(self._reason) + + def __eq__(self, other): + if not isinstance(other, CRLReason): + return NotImplemented + + return self.reason == other.reason + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.reason) + + reason = utils.read_only_property("_reason") + + +@utils.register_interface(ExtensionType) +class InvalidityDate(object): + oid = CRLEntryExtensionOID.INVALIDITY_DATE + + def __init__(self, invalidity_date): + if not isinstance(invalidity_date, datetime.datetime): + raise TypeError("invalidity_date must be a datetime.datetime") + + self._invalidity_date = invalidity_date + + def __repr__(self): + return "".format( + self._invalidity_date + ) + + def __eq__(self, other): + if not isinstance(other, InvalidityDate): + return NotImplemented + + return self.invalidity_date == other.invalidity_date + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.invalidity_date) + + invalidity_date = utils.read_only_property("_invalidity_date") + + +@utils.register_interface(ExtensionType) +class PrecertificateSignedCertificateTimestamps(object): + oid = ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS + + def __init__(self, signed_certificate_timestamps): + signed_certificate_timestamps = list(signed_certificate_timestamps) + if not all( + isinstance(sct, SignedCertificateTimestamp) + for sct in signed_certificate_timestamps + ): + raise TypeError( + "Every item in the signed_certificate_timestamps list must be " + "a SignedCertificateTimestamp" + ) + self._signed_certificate_timestamps = signed_certificate_timestamps + + def __iter__(self): + return iter(self._signed_certificate_timestamps) + + def __len__(self): + return len(self._signed_certificate_timestamps) + + def __getitem__(self, idx): + return self._signed_certificate_timestamps[idx] + + def __repr__(self): + return ( + "".format( + list(self) + ) + ) + + +@utils.register_interface(ExtensionType) +class UnrecognizedExtension(object): + def __init__(self, oid, value): + if not isinstance(oid, ObjectIdentifier): + raise TypeError("oid must be an ObjectIdentifier") + self._oid = oid + self._value = value + + oid = utils.read_only_property("_oid") + value = utils.read_only_property("_value") + + def __repr__(self): + return ( + "".format( + self + ) + ) + + def __eq__(self, other): + if not isinstance(other, UnrecognizedExtension): + return NotImplemented + + return self.oid == other.oid and self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.oid, self.value)) diff --git a/server/www/packages/packages-common/cryptography/x509/general_name.py b/server/www/packages/packages-common/cryptography/x509/general_name.py new file mode 100644 index 0000000..26f389a --- /dev/null +++ b/server/www/packages/packages-common/cryptography/x509/general_name.py @@ -0,0 +1,345 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import ipaddress +import warnings +from email.utils import parseaddr + +import idna + +import six +from six.moves import urllib_parse + +from cryptography import utils +from cryptography.x509.name import Name +from cryptography.x509.oid import ObjectIdentifier + + +_GENERAL_NAMES = { + 0: "otherName", + 1: "rfc822Name", + 2: "dNSName", + 3: "x400Address", + 4: "directoryName", + 5: "ediPartyName", + 6: "uniformResourceIdentifier", + 7: "iPAddress", + 8: "registeredID", +} + + +class UnsupportedGeneralNameType(Exception): + def __init__(self, msg, type): + super(UnsupportedGeneralNameType, self).__init__(msg) + self.type = type + + +@six.add_metaclass(abc.ABCMeta) +class GeneralName(object): + @abc.abstractproperty + def value(self): + """ + Return the value of the object + """ + + +@utils.register_interface(GeneralName) +class RFC822Name(object): + def __init__(self, value): + if isinstance(value, six.text_type): + try: + value.encode("ascii") + except UnicodeEncodeError: + value = self._idna_encode(value) + warnings.warn( + "RFC822Name values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "idna. Support for passing unicode strings (aka U-label) " + "will be removed in a future version.", + utils.DeprecatedIn21, + stacklevel=2, + ) + else: + raise TypeError("value must be string") + + name, address = parseaddr(value) + if name or not address: + # parseaddr has found a name (e.g. Name ) or the entire + # value is an empty string. + raise ValueError("Invalid rfc822name value") + + self._value = value + + value = utils.read_only_property("_value") + + @classmethod + def _init_without_validation(cls, value): + instance = cls.__new__(cls) + instance._value = value + return instance + + def _idna_encode(self, value): + _, address = parseaddr(value) + parts = address.split(u"@") + return parts[0] + "@" + idna.encode(parts[1]).decode("ascii") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, RFC822Name): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +def _idna_encode(value): + # Retain prefixes '*.' for common/alt names and '.' for name constraints + for prefix in ['*.', '.']: + if value.startswith(prefix): + value = value[len(prefix):] + return prefix + idna.encode(value).decode("ascii") + return idna.encode(value).decode("ascii") + + +@utils.register_interface(GeneralName) +class DNSName(object): + def __init__(self, value): + if isinstance(value, six.text_type): + try: + value.encode("ascii") + except UnicodeEncodeError: + value = _idna_encode(value) + warnings.warn( + "DNSName values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "idna. Support for passing unicode strings (aka U-label) " + "will be removed in a future version.", + utils.DeprecatedIn21, + stacklevel=2, + ) + else: + raise TypeError("value must be string") + + self._value = value + + value = utils.read_only_property("_value") + + @classmethod + def _init_without_validation(cls, value): + instance = cls.__new__(cls) + instance._value = value + return instance + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, DNSName): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class UniformResourceIdentifier(object): + def __init__(self, value): + if isinstance(value, six.text_type): + try: + value.encode("ascii") + except UnicodeEncodeError: + value = self._idna_encode(value) + warnings.warn( + "URI values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "idna. Support for passing unicode strings (aka U-label) " + " will be removed in a future version.", + utils.DeprecatedIn21, + stacklevel=2, + ) + else: + raise TypeError("value must be string") + + self._value = value + + value = utils.read_only_property("_value") + + @classmethod + def _init_without_validation(cls, value): + instance = cls.__new__(cls) + instance._value = value + return instance + + def _idna_encode(self, value): + parsed = urllib_parse.urlparse(value) + if parsed.port: + netloc = ( + idna.encode(parsed.hostname) + + ":{0}".format(parsed.port).encode("ascii") + ).decode("ascii") + else: + netloc = idna.encode(parsed.hostname).decode("ascii") + + # Note that building a URL in this fashion means it should be + # semantically indistinguishable from the original but is not + # guaranteed to be exactly the same. + return urllib_parse.urlunparse(( + parsed.scheme, + netloc, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment + )) + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, UniformResourceIdentifier): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class DirectoryName(object): + def __init__(self, value): + if not isinstance(value, Name): + raise TypeError("value must be a Name") + + self._value = value + + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, DirectoryName): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class RegisteredID(object): + def __init__(self, value): + if not isinstance(value, ObjectIdentifier): + raise TypeError("value must be an ObjectIdentifier") + + self._value = value + + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, RegisteredID): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class IPAddress(object): + def __init__(self, value): + if not isinstance( + value, + ( + ipaddress.IPv4Address, + ipaddress.IPv6Address, + ipaddress.IPv4Network, + ipaddress.IPv6Network + ) + ): + raise TypeError( + "value must be an instance of ipaddress.IPv4Address, " + "ipaddress.IPv6Address, ipaddress.IPv4Network, or " + "ipaddress.IPv6Network" + ) + + self._value = value + + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, IPAddress): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class OtherName(object): + def __init__(self, type_id, value): + if not isinstance(type_id, ObjectIdentifier): + raise TypeError("type_id must be an ObjectIdentifier") + if not isinstance(value, bytes): + raise TypeError("value must be a binary string") + + self._type_id = type_id + self._value = value + + type_id = utils.read_only_property("_type_id") + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format( + self.type_id, self.value) + + def __eq__(self, other): + if not isinstance(other, OtherName): + return NotImplemented + + return self.type_id == other.type_id and self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.type_id, self.value)) diff --git a/server/www/packages/packages-common/cryptography/x509/name.py b/server/www/packages/packages-common/cryptography/x509/name.py new file mode 100644 index 0000000..5548eda --- /dev/null +++ b/server/www/packages/packages-common/cryptography/x509/name.py @@ -0,0 +1,190 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from enum import Enum + +import six + +from cryptography import utils +from cryptography.x509.oid import NameOID, ObjectIdentifier + + +class _ASN1Type(Enum): + UTF8String = 12 + NumericString = 18 + PrintableString = 19 + T61String = 20 + IA5String = 22 + UTCTime = 23 + GeneralizedTime = 24 + VisibleString = 26 + UniversalString = 28 + BMPString = 30 + + +_ASN1_TYPE_TO_ENUM = dict((i.value, i) for i in _ASN1Type) +_SENTINEL = object() +_NAMEOID_DEFAULT_TYPE = { + NameOID.COUNTRY_NAME: _ASN1Type.PrintableString, + NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString, + NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString, + NameOID.DN_QUALIFIER: _ASN1Type.PrintableString, + NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String, + NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String, +} + + +class NameAttribute(object): + def __init__(self, oid, value, _type=_SENTINEL): + if not isinstance(oid, ObjectIdentifier): + raise TypeError( + "oid argument must be an ObjectIdentifier instance." + ) + + if not isinstance(value, six.text_type): + raise TypeError( + "value argument must be a text type." + ) + + if ( + oid == NameOID.COUNTRY_NAME or + oid == NameOID.JURISDICTION_COUNTRY_NAME + ): + if len(value.encode("utf8")) != 2: + raise ValueError( + "Country name must be a 2 character country code" + ) + + if len(value) == 0: + raise ValueError("Value cannot be an empty string") + + # The appropriate ASN1 string type varies by OID and is defined across + # multiple RFCs including 2459, 3280, and 5280. In general UTF8String + # is preferred (2459), but 3280 and 5280 specify several OIDs with + # alternate types. This means when we see the sentinel value we need + # to look up whether the OID has a non-UTF8 type. If it does, set it + # to that. Otherwise, UTF8! + if _type == _SENTINEL: + _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String) + + if not isinstance(_type, _ASN1Type): + raise TypeError("_type must be from the _ASN1Type enum") + + self._oid = oid + self._value = value + self._type = _type + + oid = utils.read_only_property("_oid") + value = utils.read_only_property("_value") + + def __eq__(self, other): + if not isinstance(other, NameAttribute): + return NotImplemented + + return ( + self.oid == other.oid and + self.value == other.value + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.oid, self.value)) + + def __repr__(self): + return "".format(self) + + +class RelativeDistinguishedName(object): + def __init__(self, attributes): + attributes = list(attributes) + if not attributes: + raise ValueError("a relative distinguished name cannot be empty") + if not all(isinstance(x, NameAttribute) for x in attributes): + raise TypeError("attributes must be an iterable of NameAttribute") + + # Keep list and frozenset to preserve attribute order where it matters + self._attributes = attributes + self._attribute_set = frozenset(attributes) + + if len(self._attribute_set) != len(attributes): + raise ValueError("duplicate attributes are not allowed") + + def get_attributes_for_oid(self, oid): + return [i for i in self if i.oid == oid] + + def __eq__(self, other): + if not isinstance(other, RelativeDistinguishedName): + return NotImplemented + + return self._attribute_set == other._attribute_set + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self._attribute_set) + + def __iter__(self): + return iter(self._attributes) + + def __len__(self): + return len(self._attributes) + + def __repr__(self): + return "".format(list(self)) + + +class Name(object): + def __init__(self, attributes): + attributes = list(attributes) + if all(isinstance(x, NameAttribute) for x in attributes): + self._attributes = [ + RelativeDistinguishedName([x]) for x in attributes + ] + elif all(isinstance(x, RelativeDistinguishedName) for x in attributes): + self._attributes = attributes + else: + raise TypeError( + "attributes must be a list of NameAttribute" + " or a list RelativeDistinguishedName" + ) + + def get_attributes_for_oid(self, oid): + return [i for i in self if i.oid == oid] + + @property + def rdns(self): + return self._attributes + + def public_bytes(self, backend): + return backend.x509_name_bytes(self) + + def __eq__(self, other): + if not isinstance(other, Name): + return NotImplemented + + return self._attributes == other._attributes + + def __ne__(self, other): + return not self == other + + def __hash__(self): + # TODO: this is relatively expensive, if this looks like a bottleneck + # for you, consider optimizing! + return hash(tuple(self._attributes)) + + def __iter__(self): + for rdn in self._attributes: + for ava in rdn: + yield ava + + def __len__(self): + return sum(len(rdn) for rdn in self._attributes) + + def __repr__(self): + return "".format(list(self)) diff --git a/server/www/packages/packages-common/cryptography/x509/oid.py b/server/www/packages/packages-common/cryptography/x509/oid.py new file mode 100644 index 0000000..90003d7 --- /dev/null +++ b/server/www/packages/packages-common/cryptography/x509/oid.py @@ -0,0 +1,271 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.hazmat.primitives import hashes + + +class ObjectIdentifier(object): + def __init__(self, dotted_string): + self._dotted_string = dotted_string + + nodes = self._dotted_string.split(".") + intnodes = [] + + # There must be at least 2 nodes, the first node must be 0..2, and + # if less than 2, the second node cannot have a value outside the + # range 0..39. All nodes must be integers. + for node in nodes: + try: + intnodes.append(int(node, 0)) + except ValueError: + raise ValueError( + "Malformed OID: %s (non-integer nodes)" % ( + self._dotted_string)) + + if len(nodes) < 2: + raise ValueError( + "Malformed OID: %s (insufficient number of nodes)" % ( + self._dotted_string)) + + if intnodes[0] > 2: + raise ValueError( + "Malformed OID: %s (first node outside valid range)" % ( + self._dotted_string)) + + if intnodes[0] < 2 and intnodes[1] >= 40: + raise ValueError( + "Malformed OID: %s (second node outside valid range)" % ( + self._dotted_string)) + + def __eq__(self, other): + if not isinstance(other, ObjectIdentifier): + return NotImplemented + + return self.dotted_string == other.dotted_string + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return "".format( + self.dotted_string, + self._name + ) + + def __hash__(self): + return hash(self.dotted_string) + + @property + def _name(self): + return _OID_NAMES.get(self, "Unknown OID") + + dotted_string = utils.read_only_property("_dotted_string") + + +class ExtensionOID(object): + SUBJECT_DIRECTORY_ATTRIBUTES = ObjectIdentifier("2.5.29.9") + SUBJECT_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.14") + KEY_USAGE = ObjectIdentifier("2.5.29.15") + SUBJECT_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.17") + ISSUER_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.18") + BASIC_CONSTRAINTS = ObjectIdentifier("2.5.29.19") + NAME_CONSTRAINTS = ObjectIdentifier("2.5.29.30") + CRL_DISTRIBUTION_POINTS = ObjectIdentifier("2.5.29.31") + CERTIFICATE_POLICIES = ObjectIdentifier("2.5.29.32") + POLICY_MAPPINGS = ObjectIdentifier("2.5.29.33") + AUTHORITY_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.35") + POLICY_CONSTRAINTS = ObjectIdentifier("2.5.29.36") + EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37") + FRESHEST_CRL = ObjectIdentifier("2.5.29.46") + INHIBIT_ANY_POLICY = ObjectIdentifier("2.5.29.54") + AUTHORITY_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.1") + SUBJECT_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.11") + OCSP_NO_CHECK = ObjectIdentifier("1.3.6.1.5.5.7.48.1.5") + TLS_FEATURE = ObjectIdentifier("1.3.6.1.5.5.7.1.24") + CRL_NUMBER = ObjectIdentifier("2.5.29.20") + DELTA_CRL_INDICATOR = ObjectIdentifier("2.5.29.27") + PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS = ( + ObjectIdentifier("1.3.6.1.4.1.11129.2.4.2") + ) + + +class CRLEntryExtensionOID(object): + CERTIFICATE_ISSUER = ObjectIdentifier("2.5.29.29") + CRL_REASON = ObjectIdentifier("2.5.29.21") + INVALIDITY_DATE = ObjectIdentifier("2.5.29.24") + + +class NameOID(object): + COMMON_NAME = ObjectIdentifier("2.5.4.3") + COUNTRY_NAME = ObjectIdentifier("2.5.4.6") + LOCALITY_NAME = ObjectIdentifier("2.5.4.7") + STATE_OR_PROVINCE_NAME = ObjectIdentifier("2.5.4.8") + STREET_ADDRESS = ObjectIdentifier("2.5.4.9") + ORGANIZATION_NAME = ObjectIdentifier("2.5.4.10") + ORGANIZATIONAL_UNIT_NAME = ObjectIdentifier("2.5.4.11") + SERIAL_NUMBER = ObjectIdentifier("2.5.4.5") + SURNAME = ObjectIdentifier("2.5.4.4") + GIVEN_NAME = ObjectIdentifier("2.5.4.42") + TITLE = ObjectIdentifier("2.5.4.12") + GENERATION_QUALIFIER = ObjectIdentifier("2.5.4.44") + X500_UNIQUE_IDENTIFIER = ObjectIdentifier("2.5.4.45") + DN_QUALIFIER = ObjectIdentifier("2.5.4.46") + PSEUDONYM = ObjectIdentifier("2.5.4.65") + USER_ID = ObjectIdentifier("0.9.2342.19200300.100.1.1") + DOMAIN_COMPONENT = ObjectIdentifier("0.9.2342.19200300.100.1.25") + EMAIL_ADDRESS = ObjectIdentifier("1.2.840.113549.1.9.1") + JURISDICTION_COUNTRY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.3") + JURISDICTION_LOCALITY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.1") + JURISDICTION_STATE_OR_PROVINCE_NAME = ObjectIdentifier( + "1.3.6.1.4.1.311.60.2.1.2" + ) + BUSINESS_CATEGORY = ObjectIdentifier("2.5.4.15") + POSTAL_ADDRESS = ObjectIdentifier("2.5.4.16") + POSTAL_CODE = ObjectIdentifier("2.5.4.17") + + +class SignatureAlgorithmOID(object): + RSA_WITH_MD5 = ObjectIdentifier("1.2.840.113549.1.1.4") + RSA_WITH_SHA1 = ObjectIdentifier("1.2.840.113549.1.1.5") + # This is an alternate OID for RSA with SHA1 that is occasionally seen + _RSA_WITH_SHA1 = ObjectIdentifier("1.3.14.3.2.29") + RSA_WITH_SHA224 = ObjectIdentifier("1.2.840.113549.1.1.14") + RSA_WITH_SHA256 = ObjectIdentifier("1.2.840.113549.1.1.11") + RSA_WITH_SHA384 = ObjectIdentifier("1.2.840.113549.1.1.12") + RSA_WITH_SHA512 = ObjectIdentifier("1.2.840.113549.1.1.13") + RSASSA_PSS = ObjectIdentifier("1.2.840.113549.1.1.10") + ECDSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10045.4.1") + ECDSA_WITH_SHA224 = ObjectIdentifier("1.2.840.10045.4.3.1") + ECDSA_WITH_SHA256 = ObjectIdentifier("1.2.840.10045.4.3.2") + ECDSA_WITH_SHA384 = ObjectIdentifier("1.2.840.10045.4.3.3") + ECDSA_WITH_SHA512 = ObjectIdentifier("1.2.840.10045.4.3.4") + DSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10040.4.3") + DSA_WITH_SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.3.1") + DSA_WITH_SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.3.2") + + +_SIG_OIDS_TO_HASH = { + SignatureAlgorithmOID.RSA_WITH_MD5: hashes.MD5(), + SignatureAlgorithmOID.RSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID._RSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.RSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.RSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.RSA_WITH_SHA384: hashes.SHA384(), + SignatureAlgorithmOID.RSA_WITH_SHA512: hashes.SHA512(), + SignatureAlgorithmOID.ECDSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.ECDSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.ECDSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.ECDSA_WITH_SHA384: hashes.SHA384(), + SignatureAlgorithmOID.ECDSA_WITH_SHA512: hashes.SHA512(), + SignatureAlgorithmOID.DSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.DSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.DSA_WITH_SHA256: hashes.SHA256() +} + + +class ExtendedKeyUsageOID(object): + SERVER_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.1") + CLIENT_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.2") + CODE_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.3") + EMAIL_PROTECTION = ObjectIdentifier("1.3.6.1.5.5.7.3.4") + TIME_STAMPING = ObjectIdentifier("1.3.6.1.5.5.7.3.8") + OCSP_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.9") + ANY_EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37.0") + + +class AuthorityInformationAccessOID(object): + CA_ISSUERS = ObjectIdentifier("1.3.6.1.5.5.7.48.2") + OCSP = ObjectIdentifier("1.3.6.1.5.5.7.48.1") + + +class CertificatePoliciesOID(object): + CPS_QUALIFIER = ObjectIdentifier("1.3.6.1.5.5.7.2.1") + CPS_USER_NOTICE = ObjectIdentifier("1.3.6.1.5.5.7.2.2") + ANY_POLICY = ObjectIdentifier("2.5.29.32.0") + + +_OID_NAMES = { + NameOID.COMMON_NAME: "commonName", + NameOID.COUNTRY_NAME: "countryName", + NameOID.LOCALITY_NAME: "localityName", + NameOID.STATE_OR_PROVINCE_NAME: "stateOrProvinceName", + NameOID.STREET_ADDRESS: "streetAddress", + NameOID.ORGANIZATION_NAME: "organizationName", + NameOID.ORGANIZATIONAL_UNIT_NAME: "organizationalUnitName", + NameOID.SERIAL_NUMBER: "serialNumber", + NameOID.SURNAME: "surname", + NameOID.GIVEN_NAME: "givenName", + NameOID.TITLE: "title", + NameOID.GENERATION_QUALIFIER: "generationQualifier", + NameOID.X500_UNIQUE_IDENTIFIER: "x500UniqueIdentifier", + NameOID.DN_QUALIFIER: "dnQualifier", + NameOID.PSEUDONYM: "pseudonym", + NameOID.USER_ID: "userID", + NameOID.DOMAIN_COMPONENT: "domainComponent", + NameOID.EMAIL_ADDRESS: "emailAddress", + NameOID.JURISDICTION_COUNTRY_NAME: "jurisdictionCountryName", + NameOID.JURISDICTION_LOCALITY_NAME: "jurisdictionLocalityName", + NameOID.JURISDICTION_STATE_OR_PROVINCE_NAME: ( + "jurisdictionStateOrProvinceName" + ), + NameOID.BUSINESS_CATEGORY: "businessCategory", + NameOID.POSTAL_ADDRESS: "postalAddress", + NameOID.POSTAL_CODE: "postalCode", + + SignatureAlgorithmOID.RSA_WITH_MD5: "md5WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA1: "sha1WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA224: "sha224WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA256: "sha256WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA384: "sha384WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA512: "sha512WithRSAEncryption", + SignatureAlgorithmOID.RSASSA_PSS: "RSASSA-PSS", + SignatureAlgorithmOID.ECDSA_WITH_SHA1: "ecdsa-with-SHA1", + SignatureAlgorithmOID.ECDSA_WITH_SHA224: "ecdsa-with-SHA224", + SignatureAlgorithmOID.ECDSA_WITH_SHA256: "ecdsa-with-SHA256", + SignatureAlgorithmOID.ECDSA_WITH_SHA384: "ecdsa-with-SHA384", + SignatureAlgorithmOID.ECDSA_WITH_SHA512: "ecdsa-with-SHA512", + SignatureAlgorithmOID.DSA_WITH_SHA1: "dsa-with-sha1", + SignatureAlgorithmOID.DSA_WITH_SHA224: "dsa-with-sha224", + SignatureAlgorithmOID.DSA_WITH_SHA256: "dsa-with-sha256", + ExtendedKeyUsageOID.SERVER_AUTH: "serverAuth", + ExtendedKeyUsageOID.CLIENT_AUTH: "clientAuth", + ExtendedKeyUsageOID.CODE_SIGNING: "codeSigning", + ExtendedKeyUsageOID.EMAIL_PROTECTION: "emailProtection", + ExtendedKeyUsageOID.TIME_STAMPING: "timeStamping", + ExtendedKeyUsageOID.OCSP_SIGNING: "OCSPSigning", + ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES: "subjectDirectoryAttributes", + ExtensionOID.SUBJECT_KEY_IDENTIFIER: "subjectKeyIdentifier", + ExtensionOID.KEY_USAGE: "keyUsage", + ExtensionOID.SUBJECT_ALTERNATIVE_NAME: "subjectAltName", + ExtensionOID.ISSUER_ALTERNATIVE_NAME: "issuerAltName", + ExtensionOID.BASIC_CONSTRAINTS: "basicConstraints", + ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS: ( + "signedCertificateTimestampList" + ), + CRLEntryExtensionOID.CRL_REASON: "cRLReason", + CRLEntryExtensionOID.INVALIDITY_DATE: "invalidityDate", + CRLEntryExtensionOID.CERTIFICATE_ISSUER: "certificateIssuer", + ExtensionOID.NAME_CONSTRAINTS: "nameConstraints", + ExtensionOID.CRL_DISTRIBUTION_POINTS: "cRLDistributionPoints", + ExtensionOID.CERTIFICATE_POLICIES: "certificatePolicies", + ExtensionOID.POLICY_MAPPINGS: "policyMappings", + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: "authorityKeyIdentifier", + ExtensionOID.POLICY_CONSTRAINTS: "policyConstraints", + ExtensionOID.EXTENDED_KEY_USAGE: "extendedKeyUsage", + ExtensionOID.FRESHEST_CRL: "freshestCRL", + ExtensionOID.INHIBIT_ANY_POLICY: "inhibitAnyPolicy", + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: "authorityInfoAccess", + ExtensionOID.SUBJECT_INFORMATION_ACCESS: "subjectInfoAccess", + ExtensionOID.OCSP_NO_CHECK: "OCSPNoCheck", + ExtensionOID.CRL_NUMBER: "cRLNumber", + ExtensionOID.DELTA_CRL_INDICATOR: "deltaCRLIndicator", + ExtensionOID.TLS_FEATURE: "TLSFeature", + AuthorityInformationAccessOID.OCSP: "OCSP", + AuthorityInformationAccessOID.CA_ISSUERS: "caIssuers", + CertificatePoliciesOID.CPS_QUALIFIER: "id-qt-cps", + CertificatePoliciesOID.CPS_USER_NOTICE: "id-qt-unotice", +} diff --git a/server/www/packages/packages-common/mako/__init__.py b/server/www/packages/packages-common/mako/__init__.py index eaa24dc..01c1739 100644 --- a/server/www/packages/packages-common/mako/__init__.py +++ b/server/www/packages/packages-common/mako/__init__.py @@ -5,4 +5,4 @@ # the MIT License: http://www.opensource.org/licenses/mit-license.php -__version__ = '1.0.6' +__version__ = '1.0.7' diff --git a/server/www/packages/packages-common/mako/_ast_util.py b/server/www/packages/packages-common/mako/_ast_util.py index 8d19b0d..c410287 100644 --- a/server/www/packages/packages-common/mako/_ast_util.py +++ b/server/www/packages/packages-common/mako/_ast_util.py @@ -187,7 +187,7 @@ def iter_fields(node): def get_fields(node): - """Like `iter_fiels` but returns a dict.""" + """Like `iter_fields` but returns a dict.""" return dict(iter_fields(node)) diff --git a/server/www/packages/packages-common/mako/cache.py b/server/www/packages/packages-common/mako/cache.py index 94f3870..1af17dd 100644 --- a/server/www/packages/packages-common/mako/cache.py +++ b/server/www/packages/packages-common/mako/cache.py @@ -95,7 +95,7 @@ class Cache(object): **self._get_cache_kw(kw, context)) def set(self, key, value, **kw): - """Place a value in the cache. + r"""Place a value in the cache. :param key: the value's key. :param value: the value. @@ -113,7 +113,7 @@ class Cache(object): """ def get(self, key, **kw): - """Retrieve a value from the cache. + r"""Retrieve a value from the cache. :param key: the value's key. :param \**kw: cache configuration arguments. The @@ -125,7 +125,7 @@ class Cache(object): return self.impl.get(key, **self._get_cache_kw(kw, None)) def invalidate(self, key, **kw): - """Invalidate a value in the cache. + r"""Invalidate a value in the cache. :param key: the value's key. :param \**kw: cache configuration arguments. The @@ -194,7 +194,7 @@ class CacheImpl(object): """ def get_or_create(self, key, creation_function, **kw): - """Retrieve a value from the cache, using the given creation function + r"""Retrieve a value from the cache, using the given creation function to generate a new value. This function *must* return a value, either from @@ -212,7 +212,7 @@ class CacheImpl(object): raise NotImplementedError() def set(self, key, value, **kw): - """Place a value in the cache. + r"""Place a value in the cache. :param key: the value's key. :param value: the value. @@ -222,7 +222,7 @@ class CacheImpl(object): raise NotImplementedError() def get(self, key, **kw): - """Retrieve a value from the cache. + r"""Retrieve a value from the cache. :param key: the value's key. :param \**kw: cache configuration arguments. @@ -231,7 +231,7 @@ class CacheImpl(object): raise NotImplementedError() def invalidate(self, key, **kw): - """Invalidate a value in the cache. + r"""Invalidate a value in the cache. :param key: the value's key. :param \**kw: cache configuration arguments. diff --git a/server/www/packages/packages-common/mako/cmd.py b/server/www/packages/packages-common/mako/cmd.py index dd1f833..8db1346 100644 --- a/server/www/packages/packages-common/mako/cmd.py +++ b/server/www/packages/packages-common/mako/cmd.py @@ -58,7 +58,7 @@ def cmdline(argv=None): kw = dict([varsplit(var) for var in options.var]) try: - print(template.render(**kw)) + sys.stdout.write(template.render(**kw)) except: _exit() diff --git a/server/www/packages/packages-common/mako/ext/extract.py b/server/www/packages/packages-common/mako/ext/extract.py index 8dd2e96..d777ea8 100644 --- a/server/www/packages/packages-common/mako/ext/extract.py +++ b/server/www/packages/packages-common/mako/ext/extract.py @@ -68,7 +68,7 @@ class MessageExtractor(object): else: continue - # Comments don't apply unless they immediately preceed the message + # Comments don't apply unless they immediately precede the message if translator_comments and \ translator_comments[-1][0] < node.lineno - 1: translator_comments = [] diff --git a/server/www/packages/packages-common/mako/parsetree.py b/server/www/packages/packages-common/mako/parsetree.py index 879882e..e129916 100644 --- a/server/www/packages/packages-common/mako/parsetree.py +++ b/server/www/packages/packages-common/mako/parsetree.py @@ -258,7 +258,7 @@ class Tag(compat.with_metaclass(_TagMeta, Node)): def __init__(self, keyword, attributes, expressions, nonexpressions, required, **kwargs): - """construct a new Tag instance. + r"""construct a new Tag instance. this constructor not called directly, and is only called by subclasses. diff --git a/server/www/packages/packages-common/mako/template.py b/server/www/packages/packages-common/mako/template.py index c3e0c25..329632c 100644 --- a/server/www/packages/packages-common/mako/template.py +++ b/server/www/packages/packages-common/mako/template.py @@ -21,7 +21,7 @@ import weakref class Template(object): - """Represents a compiled template. + r"""Represents a compiled template. :class:`.Template` includes a reference to the original template source (via the :attr:`.source` attribute) diff --git a/server/www/packages/packages-common/pymysql/__init__.py b/server/www/packages/packages-common/pymysql/__init__.py index 43fb9a0..b79b4b8 100644 --- a/server/www/packages/packages-common/pymysql/__init__.py +++ b/server/www/packages/packages-common/pymysql/__init__.py @@ -35,7 +35,11 @@ from .times import ( DateFromTicks, TimeFromTicks, TimestampFromTicks) -VERSION = (0, 7, 11, None) +VERSION = (0, 9, 2, None) +if VERSION[3] is not None: + VERSION_STRING = "%d.%d.%d_%s" % VERSION +else: + VERSION_STRING = "%d.%d.%d" % VERSION[:3] threadsafety = 1 apilevel = "2.0" paramstyle = "pyformat" @@ -96,12 +100,15 @@ del _orig_conn def get_client_info(): # for MySQLdb compatibility - return '.'.join(map(str, VERSION)) + version = VERSION + if VERSION[3] is None: + version = VERSION[:3] + return '.'.join(map(str, version)) connect = Connection = Connect # we include a doctored version_info here for MySQLdb compatibility -version_info = (1,2,6,"final",0) +version_info = (1, 3, 12, "final", 0) NULL = "NULL" @@ -113,7 +120,7 @@ def thread_safe(): def install_as_MySQLdb(): """ After this function is called, any application that imports MySQLdb or - _mysql will unwittingly actually use + _mysql will unwittingly actually use pymysql. """ sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["pymysql"] diff --git a/server/www/packages/packages-common/pymysql/connections.py b/server/www/packages/packages-common/pymysql/connections.py index 31dd85a..1e580d2 100644 --- a/server/www/packages/packages-common/pymysql/connections.py +++ b/server/www/packages/packages-common/pymysql/connections.py @@ -6,8 +6,6 @@ from __future__ import print_function from ._compat import PY2, range_type, text_type, str_type, JYTHON, IRONPYTHON import errno -from functools import partial -import hashlib import io import os import socket @@ -16,13 +14,19 @@ import sys import traceback import warnings -from .charset import MBLENGTH, charset_by_name, charset_by_id +from . import _auth + +from .charset import charset_by_name, charset_by_id from .constants import CLIENT, COMMAND, CR, FIELD_TYPE, SERVER_STATUS -from .converters import escape_item, escape_string, through, conversions as _conv +from . import converters from .cursors import Cursor from .optionfile import Parser +from .protocol import ( + dump_packet, MysqlPacket, FieldDescriptorPacket, OKPacketWrapper, + EOFPacketWrapper, LoadLocalPacketWrapper +) from .util import byte2int, int2byte -from . import err +from . import err, VERSION_STRING try: import ssl @@ -39,51 +43,39 @@ except (ImportError, KeyError): # KeyError occurs when there's no entry in OS database for a current user. DEFAULT_USER = None - DEBUG = False _py_version = sys.version_info[:2] +if PY2: + pass +elif _py_version < (3, 6): + # See http://bugs.python.org/issue24870 + _surrogateescape_table = [chr(i) if i < 0x80 else chr(i + 0xdc00) for i in range(256)] + + def _fast_surrogateescape(s): + return s.decode('latin1').translate(_surrogateescape_table) +else: + def _fast_surrogateescape(s): + return s.decode('ascii', 'surrogateescape') # socket.makefile() in Python 2 is not usable because very inefficient and # bad behavior about timeout. # XXX: ._socketio doesn't work under IronPython. -if _py_version == (2, 7) and not IRONPYTHON: +if PY2 and not IRONPYTHON: # read method of file-like returned by sock.makefile() is very slow. # So we copy io-based one from Python 3. from ._socketio import SocketIO def _makefile(sock, mode): return io.BufferedReader(SocketIO(sock, mode)) -elif _py_version == (2, 6): - # Python 2.6 doesn't have fast io module. - # So we make original one. - class SockFile(object): - def __init__(self, sock): - self._sock = sock - - def read(self, n): - read = self._sock.recv(n) - if len(read) == n: - return read - while True: - data = self._sock.recv(n-len(read)) - if not data: - return read - read += data - if len(read) == n: - return read - - def _makefile(sock, mode): - assert mode == 'rb' - return SockFile(sock) else: # socket.makefile in Python 3 is nice. def _makefile(sock, mode): return sock.makefile(mode) -TEXT_TYPES = set([ +TEXT_TYPES = { FIELD_TYPE.BIT, FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB, @@ -92,122 +84,19 @@ TEXT_TYPES = set([ FIELD_TYPE.TINY_BLOB, FIELD_TYPE.VAR_STRING, FIELD_TYPE.VARCHAR, - FIELD_TYPE.GEOMETRY]) + FIELD_TYPE.GEOMETRY, +} -sha_new = partial(hashlib.new, 'sha1') -NULL_COLUMN = 251 -UNSIGNED_CHAR_COLUMN = 251 -UNSIGNED_SHORT_COLUMN = 252 -UNSIGNED_INT24_COLUMN = 253 -UNSIGNED_INT64_COLUMN = 254 - -DEFAULT_CHARSET = 'latin1' +DEFAULT_CHARSET = 'utf8mb4' # TODO: change to utf8mb4 MAX_PACKET_LEN = 2**24-1 -def dump_packet(data): # pragma: no cover - def is_ascii(data): - if 65 <= byte2int(data) <= 122: - if isinstance(data, int): - return chr(data) - return data - return '.' - - try: - print("packet length:", len(data)) - for i in range(1, 6): - f = sys._getframe(i) - print("call[%d]: %s (line %d)" % (i, f.f_code.co_name, f.f_lineno)) - print("-" * 66) - except ValueError: - pass - dump_data = [data[i:i+16] for i in range_type(0, min(len(data), 256), 16)] - for d in dump_data: - print(' '.join(map(lambda x: "{:02X}".format(byte2int(x)), d)) + - ' ' * (16 - len(d)) + ' ' * 2 + - ''.join(map(lambda x: "{}".format(is_ascii(x)), d))) - print("-" * 66) - print() - - -def _scramble(password, message): - if not password: - return b'' - if DEBUG: print('password=' + str(password)) - stage1 = sha_new(password).digest() - stage2 = sha_new(stage1).digest() - s = sha_new() - s.update(message) - s.update(stage2) - result = s.digest() - return _my_crypt(result, stage1) - - -def _my_crypt(message1, message2): - length = len(message1) - result = b'' - for i in range_type(length): - x = (struct.unpack('B', message1[i:i+1])[0] ^ - struct.unpack('B', message2[i:i+1])[0]) - result += struct.pack('B', x) - return result - -# old_passwords support ported from libmysql/password.c -SCRAMBLE_LENGTH_323 = 8 - - -class RandStruct_323(object): - def __init__(self, seed1, seed2): - self.max_value = 0x3FFFFFFF - self.seed1 = seed1 % self.max_value - self.seed2 = seed2 % self.max_value - - def my_rnd(self): - self.seed1 = (self.seed1 * 3 + self.seed2) % self.max_value - self.seed2 = (self.seed1 + self.seed2 + 33) % self.max_value - return float(self.seed1) / float(self.max_value) - - -def _scramble_323(password, message): - hash_pass = _hash_password_323(password) - hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323]) - hash_pass_n = struct.unpack(">LL", hash_pass) - hash_message_n = struct.unpack(">LL", hash_message) - - rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0], - hash_pass_n[1] ^ hash_message_n[1]) - outbuf = io.BytesIO() - for _ in range_type(min(SCRAMBLE_LENGTH_323, len(message))): - outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64)) - extra = int2byte(int(rand_st.my_rnd() * 31)) - out = outbuf.getvalue() - outbuf = io.BytesIO() - for c in out: - outbuf.write(int2byte(byte2int(c) ^ byte2int(extra))) - return outbuf.getvalue() - - -def _hash_password_323(password): - nr = 1345345333 - add = 7 - nr2 = 0x12345671 - - # x in py3 is numbers, p27 is chars - for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]: - nr ^= (((nr & 63) + add) * c) + (nr << 8) & 0xFFFFFFFF - nr2 = (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF - add = (add + c) & 0xFFFFFFFF - - r1 = nr & ((1 << 31) - 1) # kill sign bits - r2 = nr2 & ((1 << 31) - 1) - return struct.pack(">LL", r1, r2) - - def pack_int24(n): return struct.pack(' len(self._data): - raise Exception('Invalid advance amount (%s) for cursor. ' - 'Position=%s' % (length, new_position)) - self._position = new_position - - def rewind(self, position=0): - """Set the position of the data buffer cursor to 'position'.""" - if position < 0 or position > len(self._data): - raise Exception("Invalid position to rewind cursor to: %s." % position) - self._position = position - - def get_bytes(self, position, length=1): - """Get 'length' bytes starting at 'position'. - - Position is start of payload (first four packet header bytes are not - included) starting at index '0'. - - No error checking is done. If requesting outside end of buffer - an empty string (or string shorter than 'length') may be returned! - """ - return self._data[position:(position+length)] - - if PY2: - def read_uint8(self): - result = ord(self._data[self._position]) - self._position += 1 - return result - else: - def read_uint8(self): - result = self._data[self._position] - self._position += 1 - return result - - def read_uint16(self): - result = struct.unpack_from('= 7 - - def is_eof_packet(self): - # http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-EOF_Packet - # Caution: \xFE may be LengthEncodedInteger. - # If \xFE is LengthEncodedInteger header, 8bytes followed. - return self._data[0:1] == b'\xfe' and len(self._data) < 9 - - def is_auth_switch_request(self): - # http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest - return self._data[0:1] == b'\xfe' - - def is_resultset_packet(self): - field_count = ord(self._data[0:1]) - return 1 <= field_count <= 250 - - def is_load_local_packet(self): - return self._data[0:1] == b'\xfb' - - def is_error_packet(self): - return self._data[0:1] == b'\xff' - - def check_error(self): - if self.is_error_packet(): - self.rewind() - self.advance(1) # field_count == error (we already know that) - errno = self.read_uint16() - if DEBUG: print("errno =", errno) - err.raise_mysql_exception(self._data) - - def dump(self): - dump_packet(self._data) - - -class FieldDescriptorPacket(MysqlPacket): - """A MysqlPacket that represents a specific column's metadata in the result. - - Parsing is automatically done and the results are exported via public - attributes on the class such as: db, table_name, name, length, type_code. - """ - - def __init__(self, data, encoding): - MysqlPacket.__init__(self, data, encoding) - self._parse_field_descriptor(encoding) - - def _parse_field_descriptor(self, encoding): - """Parse the 'Field Descriptor' (Metadata) packet. - - This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). - """ - self.catalog = self.read_length_coded_string() - self.db = self.read_length_coded_string() - self.table_name = self.read_length_coded_string().decode(encoding) - self.org_table = self.read_length_coded_string().decode(encoding) - self.name = self.read_length_coded_string().decode(encoding) - self.org_name = self.read_length_coded_string().decode(encoding) - self.charsetnr, self.length, self.type_code, self.flags, self.scale = ( - self.read_struct('`_ in the + specification. """ _sock = None _auth_plugin_name = '' _closed = False + _secure = False def __init__(self, host=None, user=None, password="", database=None, port=0, unix_socket=None, @@ -532,63 +187,12 @@ class Connection(object): read_default_file=None, conv=None, use_unicode=None, client_flag=0, cursorclass=Cursor, init_command=None, connect_timeout=10, ssl=None, read_default_group=None, - compress=None, named_pipe=None, no_delay=None, + compress=None, named_pipe=None, autocommit=False, db=None, passwd=None, local_infile=False, max_allowed_packet=16*1024*1024, defer_connect=False, - auth_plugin_map={}, read_timeout=None, write_timeout=None, - bind_address=None): - """ - Establish a connection to the MySQL database. Accepts several - arguments: - - host: Host where the database server is located - user: Username to log in as - password: Password to use. - database: Database to use, None to not use a particular one. - port: MySQL port to use, default is usually OK. (default: 3306) - bind_address: When the client has multiple network interfaces, specify - the interface from which to connect to the host. Argument can be - a hostname or an IP address. - unix_socket: Optionally, you can use a unix socket rather than TCP/IP. - charset: Charset you want to use. - sql_mode: Default SQL_MODE to use. - read_default_file: - Specifies my.cnf file to read these parameters from under the [client] section. - conv: - Conversion dictionary to use instead of the default one. - This is used to provide custom marshalling and unmarshaling of types. - See converters. - use_unicode: - Whether or not to default to unicode strings. - This option defaults to true for Py3k. - client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT. - cursorclass: Custom cursor class to use. - init_command: Initial SQL statement to run when connection is established. - connect_timeout: Timeout before throwing an exception when connecting. - (default: 10, min: 1, max: 31536000) - ssl: - A dict of arguments similar to mysql_ssl_set()'s parameters. - For now the capath and cipher arguments are not supported. - read_default_group: Group to read from in the configuration file. - compress; Not supported - named_pipe: Not supported - autocommit: Autocommit mode. None means use server default. (default: False) - local_infile: Boolean to enable the use of LOAD DATA LOCAL command. (default: False) - max_allowed_packet: Max size of packet sent to server in bytes. (default: 16MB) - Only used to limit size of "LOAD LOCAL INFILE" data packet smaller than default (16KB). - defer_connect: Don't explicitly connect on contruction - wait for connect call. - (default: False) - auth_plugin_map: A dict of plugin names to a class that processes that plugin. - The class will take the Connection object as the argument to the constructor. - The class needs an authenticate method taking an authentication packet as - an argument. For the dialog plugin, a prompt(echo, prompt) method can be used - (if no authenticate method) for returning a string from the user. (experimental) - db: Alias for database. (for compatibility to MySQLdb) - passwd: Alias for password. (for compatibility to MySQLdb) - """ - if no_delay is not None: - warnings.warn("no_delay option is deprecated", DeprecationWarning) - + auth_plugin_map=None, read_timeout=None, write_timeout=None, + bind_address=None, binary_prefix=False, program_name=None, + server_public_key=None): if use_unicode is None and sys.version_info[0] > 2: use_unicode = True @@ -652,7 +256,9 @@ class Connection(object): self.host = host or "localhost" self.port = port or 3306 self.user = user or DEFAULT_USER - self.password = password or "" + self.password = password or b"" + if isinstance(self.password, text_type): + self.password = self.password.encode('latin1') self.db = database self.unix_socket = unix_socket self.bind_address = bind_address @@ -680,6 +286,7 @@ class Connection(object): client_flag |= CLIENT.CAPABILITIES if self.db: client_flag |= CLIENT.CONNECT_WITH_DB + self.client_flag = client_flag self.cursorclass = cursorclass @@ -692,14 +299,28 @@ class Connection(object): self.autocommit_mode = autocommit if conv is None: - conv = _conv + conv = converters.conversions + # Need for MySQLdb compatibility. self.encoders = dict([(k, v) for (k, v) in conv.items() if type(k) is not int]) self.decoders = dict([(k, v) for (k, v) in conv.items() if type(k) is int]) self.sql_mode = sql_mode self.init_command = init_command self.max_allowed_packet = max_allowed_packet - self._auth_plugin_map = auth_plugin_map + self._auth_plugin_map = auth_plugin_map or {} + self._binary_prefix = binary_prefix + self.server_public_key = server_public_key + + self._connect_attrs = { + '_client_name': 'pymysql', + '_pid': str(os.getpid()), + '_client_version': VERSION_STRING, + } + if program_name: + self._connect_attrs["program_name"] = program_name + elif sys.argv: + self._connect_attrs["program_name"] = sys.argv[0] + if defer_connect: self._sock = None else: @@ -723,7 +344,14 @@ class Connection(object): return ctx def close(self): - """Send the quit message and close the socket""" + """ + Send the quit message and close the socket. + + See `Connection.close() `_ + in the specification. + + :raise Error: If the connection is already closed. + """ if self._closed: raise err.Error("Already closed") self._closed = True @@ -739,6 +367,7 @@ class Connection(object): @property def open(self): + """Return True if the connection is open""" return self._sock is not None def _force_close(self): @@ -746,7 +375,7 @@ class Connection(object): if self._sock: try: self._sock.close() - except: + except: # noqa pass self._sock = None self._rfile = None @@ -783,39 +412,58 @@ class Connection(object): self._read_ok_packet() def commit(self): - """Commit changes to stable storage""" + """ + Commit changes to stable storage. + + See `Connection.commit() `_ + in the specification. + """ self._execute_command(COMMAND.COM_QUERY, "COMMIT") self._read_ok_packet() def rollback(self): - """Roll back the current transaction""" + """ + Roll back the current transaction. + + See `Connection.rollback() `_ + in the specification. + """ self._execute_command(COMMAND.COM_QUERY, "ROLLBACK") self._read_ok_packet() def show_warnings(self): - """SHOW WARNINGS""" + """Send the "SHOW WARNINGS" SQL command.""" self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS") result = MySQLResult(self) result.read() return result.rows def select_db(self, db): - """Set current db""" + """ + Set current db. + + :param db: The name of the db. + """ self._execute_command(COMMAND.COM_INIT_DB, db) self._read_ok_packet() def escape(self, obj, mapping=None): """Escape whatever value you pass to it. - + Non-standard, for internal use; do not use this in your applications. """ if isinstance(obj, str_type): return "'" + self.escape_string(obj) + "'" - return escape_item(obj, self.charset, mapping=mapping) + if isinstance(obj, (bytes, bytearray)): + ret = self._quote_bytes(obj) + if self._binary_prefix: + ret = "_binary" + ret + return ret + return converters.escape_item(obj, self.charset, mapping=mapping) def literal(self, obj): """Alias for escape() - + Non-standard, for internal use; do not use this in your applications. """ return self.escape(obj, self.encoders) @@ -824,10 +472,22 @@ class Connection(object): if (self.server_status & SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): return s.replace("'", "''") - return escape_string(s) + return converters.escape_string(s) + + def _quote_bytes(self, s): + if (self.server_status & + SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): + return "'%s'" % (_fast_surrogateescape(s.replace(b"'", b"''")),) + return converters.escape_bytes(s) def cursor(self, cursor=None): - """Create a new cursor to execute queries with""" + """ + Create a new cursor to execute queries with. + + :param cursor: The type of cursor to create; one of :py:class:`Cursor`, + :py:class:`SSCursor`, :py:class:`DictCursor`, or :py:class:`SSDictCursor`. + None means use Cursor. + """ if cursor: return cursor(self) return self.cursorclass(self) @@ -869,7 +529,12 @@ class Connection(object): return self._read_ok_packet() def ping(self, reconnect=True): - """Check if the server is alive""" + """ + Check if the server is alive. + + :param reconnect: If the connection is closed, reconnect. + :raise Error: If the connection is closed and reconnect=False. + """ if self._sock is None: if reconnect: self.connect() @@ -878,11 +543,11 @@ class Connection(object): raise err.Error("Already closed") try: self._execute_command(COMMAND.COM_PING, "") - return self._read_ok_packet() + self._read_ok_packet() except Exception: if reconnect: self.connect() - return self.ping(False) + self.ping(False) else: raise @@ -899,11 +564,12 @@ class Connection(object): self._closed = False try: if sock is None: - if self.unix_socket and self.host in ('localhost', '127.0.0.1'): + if self.unix_socket: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.connect_timeout) sock.connect(self.unix_socket) self.host_info = "Localhost via UNIX socket" + self._secure = True if DEBUG: print('connected using unix_socket') else: kwargs = {} @@ -948,7 +614,7 @@ class Connection(object): if sock is not None: try: sock.close() - except: + except: # noqa pass if isinstance(e, (OSError, IOError, socket.error)): @@ -981,11 +647,14 @@ class Connection(object): def _read_packet(self, packet_type=MysqlPacket): """Read an entire "mysql packet" in its entirety from the network and return a MysqlPacket type that represents the results. + + :raise OperationalError: If the connection to the MySQL server is lost. + :raise InternalError: If the packet sequence number is wrong. """ buff = b'' while True: packet_header = self._read_bytes(4) - if DEBUG: dump_packet(packet_header) + #if DEBUG: dump_packet(packet_header) btrl, btrh, packet_number = struct.unpack('= i + 6: lang, stat, cap_h, salt_len = struct.unpack('`_ in + the specification. """ #: Max statement size which :meth:`executemany` generates. @@ -32,10 +38,6 @@ class Cursor(object): _defer_warnings = False def __init__(self, connection): - """ - Do not create an instance of a Cursor yourself. Call - connections.Connection.cursor(). - """ self.connection = connection self.description = None self.rownumber = 0 @@ -95,6 +97,8 @@ class Cursor(object): return None if not current_result.has_next: return None + self._result = None + self._clear_result() conn.next_result(unbuffered=unbuffered) self._do_get_result() return True @@ -260,9 +264,10 @@ class Cursor(object): disconnected. """ conn = self._get_db() - for index, arg in enumerate(args): - q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg)) - self._query(q) + if args: + fmt = '@_{0}_%d=%s'.format(procname) + self._query('SET %s' % ','.join(fmt % (index, conn.escape(arg)) + for index, arg in enumerate(args))) self.nextset() q = "CALL %s(%s)" % (procname, @@ -319,14 +324,23 @@ class Cursor(object): def _query(self, q): conn = self._get_db() self._last_executed = q + self._clear_result() conn.query(q) self._do_get_result() return self.rowcount + def _clear_result(self): + self.rownumber = 0 + self._result = None + + self.rowcount = 0 + self.description = None + self.lastrowid = None + self._rows = None + def _do_get_result(self): conn = self._get_db() - self.rownumber = 0 self._result = result = conn._result self.rowcount = result.affected_rows @@ -432,9 +446,12 @@ class SSCursor(Cursor): finally: self.connection = None + __del__ = close + def _query(self, q): conn = self._get_db() self._last_executed = q + self._clear_result() conn.query(q, unbuffered=True) self._do_get_result() return self.rowcount diff --git a/server/www/packages/packages-common/pymysql/err.py b/server/www/packages/packages-common/pymysql/err.py index 2486263..fbc6055 100644 --- a/server/www/packages/packages-common/pymysql/err.py +++ b/server/www/packages/packages-common/pymysql/err.py @@ -78,7 +78,9 @@ _map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR, ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME, ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE, ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION, - ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION) + ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION, + ER.WRONG_DB_NAME, ER.WRONG_COLUMN_NAME, + ) _map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL, ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL, ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW) @@ -89,7 +91,7 @@ _map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK, ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE) _map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR, ER.CON_COUNT_ERROR, ER.TABLEACCESS_DENIED_ERROR, - ER.COLUMNACCESS_DENIED_ERROR) + ER.COLUMNACCESS_DENIED_ERROR, ER.CONSTRAINT_FAILED, ER.LOCK_DEADLOCK) del _map_error, ER diff --git a/server/www/packages/packages-common/pymysql/optionfile.py b/server/www/packages/packages-common/pymysql/optionfile.py index 23cce8a..91e2dfe 100644 --- a/server/www/packages/packages-common/pymysql/optionfile.py +++ b/server/www/packages/packages-common/pymysql/optionfile.py @@ -7,6 +7,9 @@ else: class Parser(configparser.RawConfigParser): + def __init__(self, **kwargs): + kwargs['allow_no_value'] = True + configparser.RawConfigParser.__init__(self, **kwargs) def __remove_quotes(self, value): quotes = ["'", "\""] diff --git a/server/www/packages/packages-common/pymysql/protocol.py b/server/www/packages/packages-common/pymysql/protocol.py new file mode 100644 index 0000000..8ccf7c4 --- /dev/null +++ b/server/www/packages/packages-common/pymysql/protocol.py @@ -0,0 +1,341 @@ +# Python implementation of low level MySQL client-server protocol +# http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +from __future__ import print_function +from .charset import MBLENGTH +from ._compat import PY2, range_type +from .constants import FIELD_TYPE, SERVER_STATUS +from . import err +from .util import byte2int + +import struct +import sys + + +DEBUG = False + +NULL_COLUMN = 251 +UNSIGNED_CHAR_COLUMN = 251 +UNSIGNED_SHORT_COLUMN = 252 +UNSIGNED_INT24_COLUMN = 253 +UNSIGNED_INT64_COLUMN = 254 + + +def dump_packet(data): # pragma: no cover + def printable(data): + if 32 <= byte2int(data) < 127: + if isinstance(data, int): + return chr(data) + return data + return '.' + + try: + print("packet length:", len(data)) + for i in range(1, 7): + f = sys._getframe(i) + print("call[%d]: %s (line %d)" % (i, f.f_code.co_name, f.f_lineno)) + print("-" * 66) + except ValueError: + pass + dump_data = [data[i:i+16] for i in range_type(0, min(len(data), 256), 16)] + for d in dump_data: + print(' '.join("{:02X}".format(byte2int(x)) for x in d) + + ' ' * (16 - len(d)) + ' ' * 2 + + ''.join(printable(x) for x in d)) + print("-" * 66) + print() + + +class MysqlPacket(object): + """Representation of a MySQL response packet. + + Provides an interface for reading/parsing the packet results. + """ + __slots__ = ('_position', '_data') + + def __init__(self, data, encoding): + self._position = 0 + self._data = data + + def get_all_data(self): + return self._data + + def read(self, size): + """Read the first 'size' bytes in packet and advance cursor past them.""" + result = self._data[self._position:(self._position+size)] + if len(result) != size: + error = ('Result length not requested length:\n' + 'Expected=%s. Actual=%s. Position: %s. Data Length: %s' + % (size, len(result), self._position, len(self._data))) + if DEBUG: + print(error) + self.dump() + raise AssertionError(error) + self._position += size + return result + + def read_all(self): + """Read all remaining data in the packet. + + (Subsequent read() will return errors.) + """ + result = self._data[self._position:] + self._position = None # ensure no subsequent read() + return result + + def advance(self, length): + """Advance the cursor in data buffer 'length' bytes.""" + new_position = self._position + length + if new_position < 0 or new_position > len(self._data): + raise Exception('Invalid advance amount (%s) for cursor. ' + 'Position=%s' % (length, new_position)) + self._position = new_position + + def rewind(self, position=0): + """Set the position of the data buffer cursor to 'position'.""" + if position < 0 or position > len(self._data): + raise Exception("Invalid position to rewind cursor to: %s." % position) + self._position = position + + def get_bytes(self, position, length=1): + """Get 'length' bytes starting at 'position'. + + Position is start of payload (first four packet header bytes are not + included) starting at index '0'. + + No error checking is done. If requesting outside end of buffer + an empty string (or string shorter than 'length') may be returned! + """ + return self._data[position:(position+length)] + + if PY2: + def read_uint8(self): + result = ord(self._data[self._position]) + self._position += 1 + return result + else: + def read_uint8(self): + result = self._data[self._position] + self._position += 1 + return result + + def read_uint16(self): + result = struct.unpack_from('= 7 + + def is_eof_packet(self): + # http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-EOF_Packet + # Caution: \xFE may be LengthEncodedInteger. + # If \xFE is LengthEncodedInteger header, 8bytes followed. + return self._data[0:1] == b'\xfe' and len(self._data) < 9 + + def is_auth_switch_request(self): + # http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest + return self._data[0:1] == b'\xfe' + + def is_extra_auth_data(self): + # https://dev.mysql.com/doc/internals/en/successful-authentication.html + return self._data[0:1] == b'\x01' + + def is_resultset_packet(self): + field_count = ord(self._data[0:1]) + return 1 <= field_count <= 250 + + def is_load_local_packet(self): + return self._data[0:1] == b'\xfb' + + def is_error_packet(self): + return self._data[0:1] == b'\xff' + + def check_error(self): + if self.is_error_packet(): + self.rewind() + self.advance(1) # field_count == error (we already know that) + errno = self.read_uint16() + if DEBUG: print("errno =", errno) + err.raise_mysql_exception(self._data) + + def dump(self): + dump_packet(self._data) + + +class FieldDescriptorPacket(MysqlPacket): + """A MysqlPacket that represents a specific column's metadata in the result. + + Parsing is automatically done and the results are exported via public + attributes on the class such as: db, table_name, name, length, type_code. + """ + + def __init__(self, data, encoding): + MysqlPacket.__init__(self, data, encoding) + self._parse_field_descriptor(encoding) + + def _parse_field_descriptor(self, encoding): + """Parse the 'Field Descriptor' (Metadata) packet. + + This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). + """ + self.catalog = self.read_length_coded_string() + self.db = self.read_length_coded_string() + self.table_name = self.read_length_coded_string().decode(encoding) + self.org_table = self.read_length_coded_string().decode(encoding) + self.name = self.read_length_coded_string().decode(encoding) + self.org_name = self.read_length_coded_string().decode(encoding) + self.charsetnr, self.length, self.type_code, self.flags, self.scale = ( + self.read_struct(' 7: + raise ValueError( + "Mask pattern should be in range(8) (got %s)" % mask_pattern) + class QRCode: def __init__(self, version=None, error_correction=constants.ERROR_CORRECT_M, box_size=10, border=4, - image_factory=None): + image_factory=None, + mask_pattern=None): _check_box_size(box_size) self.version = version and int(version) self.error_correction = int(error_correction) @@ -36,6 +47,8 @@ class QRCode: # Spec says border should be at least four boxes wide, but allow for # any (e.g. for producing printable QR codes). self.border = int(border) + _check_mask_pattern(mask_pattern) + self.mask_pattern = mask_pattern self.image_factory = image_factory if image_factory is not None: assert issubclass(image_factory, BaseImage) @@ -62,7 +75,8 @@ class QRCode: self.data_list.append(data) else: if optimize: - self.data_list.extend(util.optimal_data_chunks(data)) + self.data_list.extend( + util.optimal_data_chunks(data, minimum=optimize)) else: self.data_list.append(util.QRData(data)) self.data_cache = None @@ -76,7 +90,10 @@ class QRCode: """ if fit or (self.version is None): self.best_fit(start=self.version) - self.makeImpl(False, self.best_mask_pattern()) + if self.mask_pattern is None: + self.makeImpl(False, self.best_mask_pattern()) + else: + self.makeImpl(False, self.mask_pattern) def makeImpl(self, test, mask_pattern): _check_version(self.version) diff --git a/server/www/packages/packages-common/qrcode/mecard.py b/server/www/packages/packages-common/qrcode/mecard.py deleted file mode 100644 index ce05582..0000000 --- a/server/www/packages/packages-common/qrcode/mecard.py +++ /dev/null @@ -1,33 +0,0 @@ -import six - -# {'code': 'N', 'label': 'Name', 'required': True, 'multipart': [ -# 'Last Name', 'First Name']}, -PROPERTIES = { - 'NICKNAME': {'label': 'Nickname'}, - 'BDAY': {'label': 'Birthday', 'date': True}, - 'TEL': {'label': 'Phone'}, - 'EMAIL': {'label': 'E-mail'}, - 'ADR': {'label': 'Address', 'multipart': [ - 'PO Box', 'Room Number', 'House Number', 'City', 'Prefecture', - 'Zip Code', 'Country']}, - 'URL': {'label': 'URL'}, - 'MEMO': {'label': 'Note'}, -} - - -def build_code(data): - notation = [] - - name = data['N'] - if not isinstance(name, six.text_type): - name = ','.join(name) - notation.append('N', name) - - for prop in PROPERTIES: - value = data.get(prop['code']) - if not value: - continue - if prop['date']: - value = value.strftime('%Y%m%d') - elif prop['multipart']: - value = ','.join(value) diff --git a/server/www/packages/packages-common/qrcode/release.py b/server/www/packages/packages-common/qrcode/release.py new file mode 100644 index 0000000..abbabb4 --- /dev/null +++ b/server/www/packages/packages-common/qrcode/release.py @@ -0,0 +1,42 @@ +""" +This file provides zest.releaser entrypoints using when releasing new +qrcode versions. +""" +import os +import re +import datetime + + +def update_manpage(data): + """ + Update the version in the manpage document. + """ + if data['name'] != 'qrcode': + print('no qrcode') + return + + base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + filename = os.path.join(base_dir, 'doc', 'qr.1') + with open(filename, 'r') as f: + lines = f.readlines() + + changed = False + for i, line in enumerate(lines): + if not line.startswith('.TH '): + continue + parts = re.split(r'"([^"]*)"', line) + if len(parts) < 5: + continue + changed = parts[3] != data['new_version'] + if changed: + # Update version + parts[3] = data['new_version'] + # Update date + parts[1] = datetime.datetime.now().strftime('%-d %b %Y') + lines[i] = '"'.join(parts) + break + + if changed: + with open(filename, 'w') as f: + for line in lines: + f.write(line) diff --git a/server/www/packages/packages-common/qrcode/speedy.py b/server/www/packages/packages-common/qrcode/speedy.py deleted file mode 100644 index 11ec3aa..0000000 --- a/server/www/packages/packages-common/qrcode/speedy.py +++ /dev/null @@ -1,8 +0,0 @@ -import string -import qrcode - -qr = qrcode.QRCode() - -qr.add_data(string.letters*13) -qr.make() -print(qr.version) diff --git a/server/www/packages/packages-common/qrcode/util.py b/server/www/packages/packages-common/qrcode/util.py index 89dcf09..a9652f7 100644 --- a/server/www/packages/packages-common/qrcode/util.py +++ b/server/www/packages/packages-common/qrcode/util.py @@ -4,7 +4,7 @@ import math import six from six.moves import xrange -from qrcode import base, exceptions +from qrcode import base, exceptions, LUT # QR encoding modes. MODE_NUMBER = 1 << 0 @@ -189,51 +189,39 @@ def _lost_point_level1(modules, modules_count): lost_point = 0 modules_range = xrange(modules_count) - row_range_first = (0, 1) - row_range_last = (-1, 0) - row_range_standard = (-1, 0, 1) - - col_range_first = ((0, 1), (1,)) - col_range_last = ((-1, 0), (-1,)) - col_range_standard = ((-1, 0, 1), (-1, 1)) + container = [0] * (modules_count + 1) for row in modules_range: - - if row == 0: - row_range = row_range_first - elif row == modules_count-1: - row_range = row_range_last - else: - row_range = row_range_standard - + this_row = modules[row] + previous_color = this_row[0] + length = 0 for col in modules_range: - - sameCount = 0 - dark = modules[row][col] - - if col == 0: - col_range = col_range_first - elif col == modules_count-1: - col_range = col_range_last + if this_row[col] == previous_color: + length += 1 else: - col_range = col_range_standard + if length >= 5: + container[length] += 1 + length = 1 + previous_color = this_row[col] + if length >= 5: + container[length] += 1 - for r in row_range: + for col in modules_range: + previous_color = modules[0][col] + length = 0 + for row in modules_range: + if modules[row][col] == previous_color: + length += 1 + else: + if length >= 5: + container[length] += 1 + length = 1 + previous_color = modules[row][col] + if length >= 5: + container[length] += 1 - row_offset = row + r - - if r != 0: - col_idx = 0 - else: - col_idx = 1 - - for c in col_range[col_idx]: - - if dark == modules[row_offset][col + c]: - sameCount += 1 - - if sameCount > 5: - lost_point += (3 + sameCount - 5) + lost_point += sum(container[each_length] * (each_length - 2) + for each_length in xrange(5, modules_count + 1)) return lost_point @@ -242,68 +230,111 @@ def _lost_point_level2(modules, modules_count): lost_point = 0 modules_range = xrange(modules_count - 1) - for row in modules_range: this_row = modules[row] - next_row = modules[row+1] - for col in modules_range: - count = 0 - if this_row[col]: - count += 1 - if next_row[col]: - count += 1 - if this_row[col + 1]: - count += 1 - if next_row[col + 1]: - count += 1 - if count == 0 or count == 4: + next_row = modules[row + 1] + # use iter() and next() to skip next four-block. e.g. + # d a f if top-right a != b botton-right, + # c b e then both abcd and abef won't lost any point. + modules_range_iter = iter(modules_range) + for col in modules_range_iter: + top_right = this_row[col + 1] + if top_right != next_row[col + 1]: + # reduce 33.3% of runtime via next(). + # None: raise nothing if there is no next item. + next(modules_range_iter, None) + elif top_right != this_row[col]: + continue + elif top_right != next_row[col]: + continue + else: lost_point += 3 return lost_point def _lost_point_level3(modules, modules_count): - modules_range_short = xrange(modules_count-6) - + # 1 : 1 : 3 : 1 : 1 ratio (dark:light:dark:light:dark) pattern in + # row/column, preceded or followed by light area 4 modules wide. From ISOIEC. + # pattern1: 10111010000 + # pattern2: 00001011101 + modules_range = xrange(modules_count) + modules_range_short = xrange(modules_count-10) lost_point = 0 - for row in xrange(modules_count): + + for row in modules_range: this_row = modules[row] - for col in modules_range_short: - if (this_row[col] - and not this_row[col + 1] - and this_row[col + 2] - and this_row[col + 3] + modules_range_short_iter = iter(modules_range_short) + col = 0 + for col in modules_range_short_iter: + if ( + not this_row[col + 1] and this_row[col + 4] and not this_row[col + 5] - and this_row[col + 6]): + and this_row[col + 6] + and not this_row[col + 9] + and ( + this_row[col + 0] + and this_row[col + 2] + and this_row[col + 3] + and not this_row[col + 7] + and not this_row[col + 8] + and not this_row[col + 10] + or + not this_row[col + 0] + and not this_row[col + 2] + and not this_row[col + 3] + and this_row[col + 7] + and this_row[col + 8] + and this_row[col + 10] + ) + ): lost_point += 40 +# horspool algorithm. +# if this_row[col + 10] == True, pattern1 shift 4, pattern2 shift 2. So min=2. +# if this_row[col + 10] == False, pattern1 shift 1, pattern2 shift 1. So min=1. + if this_row[col + 10]: + next(modules_range_short_iter, None) - for col in xrange(modules_count): - for row in modules_range_short: - if (modules[row][col] - and not modules[row + 1][col] - and modules[row + 2][col] - and modules[row + 3][col] + for col in modules_range: + modules_range_short_iter = iter(modules_range_short) + row = 0 + for row in modules_range_short_iter: + if ( + not modules[row + 1][col] and modules[row + 4][col] and not modules[row + 5][col] - and modules[row + 6][col]): + and modules[row + 6][col] + and not modules[row + 9][col] + and ( + modules[row + 0][col] + and modules[row + 2][col] + and modules[row + 3][col] + and not modules[row + 7][col] + and not modules[row + 8][col] + and not modules[row + 10][col] + or + not modules[row + 0][col] + and not modules[row + 2][col] + and not modules[row + 3][col] + and modules[row + 7][col] + and modules[row + 8][col] + and modules[row + 10][col] + ) + ): lost_point += 40 + if modules[row + 10][col]: + next(modules_range_short_iter, None) return lost_point def _lost_point_level4(modules, modules_count): - modules_range = xrange(modules_count) - dark_count = 0 - - for row in modules_range: - this_row = modules[row] - for col in modules_range: - if this_row[col]: - dark_count += 1 - - ratio = abs(100 * dark_count / modules_count / modules_count - 50) / 5 - return ratio * 10 + dark_count = sum(map(sum, modules)) + percent = float(dark_count) / (modules_count**2) + # Every 5% departure from 50%, rating++ + rating = int(abs(percent * 100 - 50) / 5) + return rating * 10 def optimal_data_chunks(data, minimum=4): @@ -479,9 +510,12 @@ def create_bytes(buffer, rs_blocks): offset += dcCount # Get error correction polynomial. - rsPoly = base.Polynomial([1], 0) - for i in range(ecCount): - rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0) + if ecCount in LUT.rsPoly_LUT: + rsPoly = base.Polynomial(LUT.rsPoly_LUT[ecCount], 0) + else: + rsPoly = base.Polynomial([1], 0) + for i in range(ecCount): + rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0) rawPoly = base.Polynomial(dcdata[r], len(rsPoly) - 1) diff --git a/server/www/packages/packages-common/six.py b/server/www/packages/packages-common/six.py index 190c023..6bf4fd3 100644 --- a/server/www/packages/packages-common/six.py +++ b/server/www/packages/packages-common/six.py @@ -1,6 +1,4 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson +# Copyright (c) 2010-2017 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -20,6 +18,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +"""Utilities for writing code that runs on Python 2 and 3""" + from __future__ import absolute_import import functools @@ -29,7 +29,7 @@ import sys import types __author__ = "Benjamin Peterson " -__version__ = "1.10.0" +__version__ = "1.11.0" # Useful for very coarse version differentiation. @@ -241,6 +241,7 @@ _moved_attributes = [ MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), @@ -262,10 +263,11 @@ _moved_attributes = [ MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), @@ -337,10 +339,12 @@ _urllib_parse_moved_attributes = [ MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), @@ -416,6 +420,8 @@ _urllib_request_moved_attributes = [ MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) @@ -679,11 +685,15 @@ if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None else: def exec_(_code_, _globs_=None, _locs_=None): @@ -699,19 +709,28 @@ else: exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb + try: + raise tp, value, tb + finally: + tb = None """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): - raise value from from_value + try: + raise value from from_value + finally: + value = None """) else: def raise_from(value, from_value): @@ -802,10 +821,14 @@ def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. - class metaclass(meta): + class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/server/www/packages/packages-common/tornado/__init__.py b/server/www/packages/packages-common/tornado/__init__.py index f054e40..6bb5ec2 100644 --- a/server/www/packages/packages-common/tornado/__init__.py +++ b/server/www/packages/packages-common/tornado/__init__.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -25,5 +24,5 @@ from __future__ import absolute_import, division, print_function # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) -version = "4.5.1" -version_info = (4, 5, 1, 0) +version = "5.1" +version_info = (5, 1, 0, 0) diff --git a/server/www/packages/packages-common/tornado/_locale_data.py b/server/www/packages/packages-common/tornado/_locale_data.py index 6fa2c29..a2c5039 100644 --- a/server/www/packages/packages-common/tornado/_locale_data.py +++ b/server/www/packages/packages-common/tornado/_locale_data.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python -# coding: utf-8 +# -*- coding: utf-8 -*- # # Copyright 2012 Facebook # diff --git a/server/www/packages/packages-common/tornado/auth.py b/server/www/packages/packages-common/tornado/auth.py index f02d289..ab1a850 100644 --- a/server/www/packages/packages-common/tornado/auth.py +++ b/server/www/packages/packages-common/tornado/auth.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -38,15 +37,14 @@ Example usage for Google OAuth: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): - @tornado.gen.coroutine - def get(self): + async def get(self): if self.get_argument('code', False): - user = yield self.get_authenticated_user( + user = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) # Save the user with e.g. set_secure_cookie else: - yield self.authorize_redirect( + await self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], @@ -74,8 +72,11 @@ import hashlib import hmac import time import uuid +import warnings -from tornado.concurrent import TracebackFuture, return_future, chain_future +from tornado.concurrent import (Future, _non_deprecated_return_future, + future_set_exc_info, chain_future, + future_set_result_unless_cancelled) from tornado import gen from tornado import httpclient from tornado import escape @@ -112,14 +113,19 @@ def _auth_return_future(f): Note that when using this decorator the ``callback`` parameter inside the function will actually be a future. + + .. deprecated:: 5.1 + Will be removed in 6.0. """ replacer = ArgReplacer(f, 'callback') @functools.wraps(f) def wrapper(*args, **kwargs): - future = TracebackFuture() + future = Future() callback, args, kwargs = replacer.replace(future, args, kwargs) if callback is not None: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) future.add_done_callback( functools.partial(_auth_future_to_callback, callback)) @@ -127,9 +133,9 @@ def _auth_return_future(f): if future.done(): return False else: - future.set_exc_info((typ, value, tb)) + future_set_exc_info(future, (typ, value, tb)) return True - with ExceptionStackContext(handle_exception): + with ExceptionStackContext(handle_exception, delay_warning=True): f(*args, **kwargs) return future return wrapper @@ -142,7 +148,7 @@ class OpenIdMixin(object): * ``_OPENID_ENDPOINT``: the identity provider's URI. """ - @return_future + @_non_deprecated_return_future def authenticate_redirect(self, callback_uri=None, ax_attrs=["name", "email", "language", "username"], callback=None): @@ -161,6 +167,11 @@ class OpenIdMixin(object): not strictly necessary as this method is synchronous, but they are supplied for consistency with `OAuthMixin.authorize_redirect`. + + .. deprecated:: 5.1 + + The ``callback`` argument and returned awaitable will be removed + in Tornado 6.0; this will be an ordinary synchronous function. """ callback_uri = callback_uri or self.request.uri args = self._openid_args(callback_uri, ax_attrs=ax_attrs) @@ -178,6 +189,11 @@ class OpenIdMixin(object): is present and `authenticate_redirect` if it is not). The result of this method will generally be used to set a cookie. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ # Verify the OpenID response via direct request to the OP args = dict((k, v[-1]) for k, v in self.request.arguments.items()) @@ -185,9 +201,9 @@ class OpenIdMixin(object): url = self._OPENID_ENDPOINT if http_client is None: http_client = self.get_auth_http_client() - http_client.fetch(url, functools.partial( - self._on_authentication_verified, callback), - method="POST", body=urllib_parse.urlencode(args)) + fut = http_client.fetch(url, method="POST", body=urllib_parse.urlencode(args)) + fut.add_done_callback(functools.partial( + self._on_authentication_verified, callback)) def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): url = urlparse.urljoin(self.request.full_url(), callback_uri) @@ -237,11 +253,16 @@ class OpenIdMixin(object): }) return args - def _on_authentication_verified(self, future, response): - if response.error or b"is_valid:true" not in response.body: + def _on_authentication_verified(self, future, response_fut): + try: + response = response_fut.result() + except Exception as e: future.set_exception(AuthError( - "Invalid OpenID response: %s" % (response.error or - response.body))) + "Error response %s" % e)) + return + if b"is_valid:true" not in response.body: + future.set_exception(AuthError( + "Invalid OpenID response: %s" % response.body)) return # Make sure we got back at least an email from attribute exchange @@ -295,7 +316,7 @@ class OpenIdMixin(object): claimed_id = self.get_argument("openid.claimed_id", None) if claimed_id: user["claimed_id"] = claimed_id - future.set_result(user) + future_set_result_unless_cancelled(future, user) def get_auth_http_client(self): """Returns the `.AsyncHTTPClient` instance to be used for auth requests. @@ -322,48 +343,52 @@ class OAuthMixin(object): Subclasses must also override the `_oauth_get_user_future` and `_oauth_consumer_token` methods. """ - @return_future + @_non_deprecated_return_future def authorize_redirect(self, callback_uri=None, extra_params=None, http_client=None, callback=None): """Redirects the user to obtain OAuth authorization for this service. The ``callback_uri`` may be omitted if you have previously - registered a callback URI with the third-party service. For - some services (including Friendfeed), you must use a - previously-registered callback URI and cannot specify a - callback via this method. + registered a callback URI with the third-party service. For + some services, you must use a previously-registered callback + URI and cannot specify a callback via this method. This method sets a cookie called ``_oauth_request_token`` which is subsequently used (and cleared) in `get_authenticated_user` for security purposes. - Note that this method is asynchronous, although it calls - `.RequestHandler.finish` for you so it may not be necessary - to pass a callback or use the `.Future` it returns. However, - if this method is called from a function decorated with - `.gen.coroutine`, you must call it with ``yield`` to keep the - response from being closed prematurely. + This method is asynchronous and must be called with ``await`` + or ``yield`` (This is different from other ``auth*_redirect`` + methods defined in this module). It calls + `.RequestHandler.finish` for you so you should not write any + other response after it returns. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): raise Exception("This service does not support oauth_callback") if http_client is None: http_client = self.get_auth_http_client() if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - http_client.fetch( + fut = http_client.fetch( self._oauth_request_token_url(callback_uri=callback_uri, - extra_params=extra_params), - functools.partial( - self._on_request_token, - self._OAUTH_AUTHORIZE_URL, - callback_uri, - callback)) + extra_params=extra_params)) + fut.add_done_callback(functools.partial( + self._on_request_token, + self._OAUTH_AUTHORIZE_URL, + callback_uri, + callback)) else: - http_client.fetch( - self._oauth_request_token_url(), + fut = http_client.fetch(self._oauth_request_token_url()) + fut.add_done_callback( functools.partial( self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri, @@ -380,6 +405,11 @@ class OAuthMixin(object): requests to this service on behalf of the user. The dictionary will also contain other fields such as ``name``, depending on the service used. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ future = callback request_key = escape.utf8(self.get_argument("oauth_token")) @@ -390,7 +420,8 @@ class OAuthMixin(object): "Missing OAuth request token cookie")) return self.clear_cookie("_oauth_request_token") - cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] + cookie_key, cookie_secret = [ + base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] if cookie_key != request_key: future.set_exception(AuthError( "Request token does not match cookie")) @@ -400,8 +431,8 @@ class OAuthMixin(object): token["verifier"] = oauth_verifier if http_client is None: http_client = self.get_auth_http_client() - http_client.fetch(self._oauth_access_token_url(token), - functools.partial(self._on_access_token, callback)) + fut = http_client.fetch(self._oauth_access_token_url(token)) + fut.add_done_callback(functools.partial(self._on_access_token, callback)) def _oauth_request_token_url(self, callback_uri=None, extra_params=None): consumer_token = self._oauth_consumer_token() @@ -429,9 +460,11 @@ class OAuthMixin(object): return url + "?" + urllib_parse.urlencode(args) def _on_request_token(self, authorize_url, callback_uri, callback, - response): - if response.error: - raise Exception("Could not get request token: %s" % response.error) + response_fut): + try: + response = response_fut.result() + except Exception as e: + raise Exception("Could not get request token: %s" % e) request_token = _oauth_parse_response(response.body) data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" + base64.b64encode(escape.utf8(request_token["secret"]))) @@ -471,13 +504,17 @@ class OAuthMixin(object): args["oauth_signature"] = signature return url + "?" + urllib_parse.urlencode(args) - def _on_access_token(self, future, response): - if response.error: + def _on_access_token(self, future, response_fut): + try: + response = response_fut.result() + except Exception: future.set_exception(AuthError("Could not fetch access token")) return access_token = _oauth_parse_response(response.body) - self._oauth_get_user_future(access_token).add_done_callback( + fut = self._oauth_get_user_future(access_token) + fut = gen.convert_yielded(fut) + fut.add_done_callback( functools.partial(self._on_oauth_get_user, access_token, future)) def _oauth_consumer_token(self): @@ -487,7 +524,7 @@ class OAuthMixin(object): """ raise NotImplementedError() - @return_future + @_non_deprecated_return_future def _oauth_get_user_future(self, access_token, callback): """Subclasses must override this to get basic information about the user. @@ -502,7 +539,18 @@ class OAuthMixin(object): For backwards compatibility, the callback-based ``_oauth_get_user`` method is also supported. + + .. versionchanged:: 5.1 + + Subclasses may also define this method with ``async def``. + + .. deprecated:: 5.1 + + The ``_oauth_get_user`` fallback is deprecated and support for it + will be removed in 6.0. """ + warnings.warn("_oauth_get_user is deprecated, override _oauth_get_user_future instead", + DeprecationWarning) # By default, call the old-style _oauth_get_user, but new code # should override this method instead. self._oauth_get_user(access_token, callback) @@ -519,7 +567,7 @@ class OAuthMixin(object): future.set_exception(AuthError("Error getting user")) return user["access_token"] = access_token - future.set_result(user) + future_set_result_unless_cancelled(future, user) def _oauth_request_parameters(self, url, access_token, parameters={}, method="GET"): @@ -569,7 +617,7 @@ class OAuth2Mixin(object): * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url. * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url. """ - @return_future + @_non_deprecated_return_future def authorize_redirect(self, redirect_uri=None, client_id=None, client_secret=None, extra_params=None, callback=None, scope=None, response_type="code"): @@ -586,6 +634,11 @@ class OAuth2Mixin(object): not strictly necessary as this method is synchronous, but they are supplied for consistency with `OAuthMixin.authorize_redirect`. + + .. deprecated:: 5.1 + + The ``callback`` argument and returned awaitable will be removed + in Tornado 6.0; this will be an ordinary synchronous function. """ args = { "redirect_uri": redirect_uri, @@ -629,16 +682,15 @@ class OAuth2Mixin(object): class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated - @tornado.gen.coroutine - def get(self): - new_entry = yield self.oauth2_request( + async def get(self): + new_entry = await self.oauth2_request( "https://graph.facebook.com/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? - yield self.authorize_redirect() + await self.authorize_redirect() return self.finish("Posted a message!") @@ -646,6 +698,11 @@ class OAuth2Mixin(object): :hide: .. versionadded:: 4.3 + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ all_args = {} if access_token: @@ -657,18 +714,19 @@ class OAuth2Mixin(object): callback = functools.partial(self._on_oauth2_request, callback) http = self.get_auth_http_client() if post_args is not None: - http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), - callback=callback) + fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) else: - http.fetch(url, callback=callback) + fut = http.fetch(url) + fut.add_done_callback(callback) - def _on_oauth2_request(self, future, response): - if response.error: - future.set_exception(AuthError("Error response %s fetching %s" % - (response.error, response.request.url))) + def _on_oauth2_request(self, future, response_fut): + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError("Error response %s" % e)) return - future.set_result(escape.json_decode(response.body)) + future_set_result_unless_cancelled(future, escape.json_decode(response.body)) def get_auth_http_client(self): """Returns the `.AsyncHTTPClient` instance to be used for auth requests. @@ -698,13 +756,12 @@ class TwitterMixin(OAuthMixin): class TwitterLoginHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): - @tornado.gen.coroutine - def get(self): + async def get(self): if self.get_argument("oauth_token", None): - user = yield self.get_authenticated_user() + user = await self.get_authenticated_user() # Save the user using e.g. set_secure_cookie() else: - yield self.authorize_redirect() + await self.authorize_redirect() .. testoutput:: :hide: @@ -721,7 +778,7 @@ class TwitterMixin(OAuthMixin): _OAUTH_NO_CALLBACKS = False _TWITTER_BASE_URL = "https://api.twitter.com/1.1" - @return_future + @_non_deprecated_return_future def authenticate_redirect(self, callback_uri=None, callback=None): """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. @@ -732,6 +789,11 @@ class TwitterMixin(OAuthMixin): .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ http = self.get_auth_http_client() http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), @@ -764,9 +826,8 @@ class TwitterMixin(OAuthMixin): class MainHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): @tornado.web.authenticated - @tornado.gen.coroutine - def get(self): - new_entry = yield self.twitter_request( + async def get(self): + new_entry = await self.twitter_request( "/statuses/update", post_args={"status": "Testing Tornado Web Server"}, access_token=self.current_user["access_token"]) @@ -779,6 +840,10 @@ class TwitterMixin(OAuthMixin): .. testoutput:: :hide: + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ if path.startswith('http:') or path.startswith('https:'): # Raw urls are useful for e.g. search which doesn't follow the @@ -798,20 +863,21 @@ class TwitterMixin(OAuthMixin): if args: url += "?" + urllib_parse.urlencode(args) http = self.get_auth_http_client() - http_callback = functools.partial(self._on_twitter_request, callback) + http_callback = functools.partial(self._on_twitter_request, callback, url) if post_args is not None: - http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), - callback=http_callback) + fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) else: - http.fetch(url, callback=http_callback) + fut = http.fetch(url) + fut.add_done_callback(http_callback) - def _on_twitter_request(self, future, response): - if response.error: + def _on_twitter_request(self, future, url, response_fut): + try: + response = response_fut.result() + except Exception as e: future.set_exception(AuthError( - "Error response %s fetching %s" % (response.error, - response.request.url))) + "Error response %s fetching %s" % (e, url))) return - future.set_result(escape.json_decode(response.body)) + future_set_result_unless_cancelled(future, escape.json_decode(response.body)) def _oauth_consumer_token(self): self.require_setting("twitter_consumer_key", "Twitter OAuth") @@ -848,8 +914,8 @@ class GoogleOAuth2Mixin(OAuth2Mixin): .. versionadded:: 3.2 """ - _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth" - _OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token" + _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth" + _OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token" _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo" _OAUTH_NO_CALLBACKS = False _OAUTH_SETTINGS_KEY = 'google_oauth' @@ -872,19 +938,18 @@ class GoogleOAuth2Mixin(OAuth2Mixin): class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): - @tornado.gen.coroutine - def get(self): + async def get(self): if self.get_argument('code', False): - access = yield self.get_authenticated_user( + access = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) - user = yield self.oauth2_request( + user = await self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with # e.g. set_secure_cookie. else: - yield self.authorize_redirect( + await self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], @@ -894,7 +959,11 @@ class GoogleOAuth2Mixin(OAuth2Mixin): .. testoutput:: :hide: - """ + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ # noqa: E501 http = self.get_auth_http_client() body = urllib_parse.urlencode({ "redirect_uri": redirect_uri, @@ -904,18 +973,22 @@ class GoogleOAuth2Mixin(OAuth2Mixin): "grant_type": "authorization_code", }) - http.fetch(self._OAUTH_ACCESS_TOKEN_URL, - functools.partial(self._on_access_token, callback), - method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body) + fut = http.fetch(self._OAUTH_ACCESS_TOKEN_URL, + method="POST", + headers={'Content-Type': 'application/x-www-form-urlencoded'}, + body=body) + fut.add_done_callback(functools.partial(self._on_access_token, callback)) - def _on_access_token(self, future, response): + def _on_access_token(self, future, response_fut): """Callback function for the exchange to the access token.""" - if response.error: - future.set_exception(AuthError('Google auth error: %s' % str(response))) + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError('Google auth error: %s' % str(e))) return args = escape.json_decode(response.body) - future.set_result(args) + future_set_result_unless_cancelled(future, args) class FacebookGraphMixin(OAuth2Mixin): @@ -936,17 +1009,16 @@ class FacebookGraphMixin(OAuth2Mixin): class FacebookGraphLoginHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): - @tornado.gen.coroutine - def get(self): + async def get(self): if self.get_argument("code", False): - user = yield self.get_authenticated_user( + user = await self.get_authenticated_user( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], client_secret=self.settings["facebook_secret"], code=self.get_argument("code")) # Save the user with e.g. set_secure_cookie else: - yield self.authorize_redirect( + await self.authorize_redirect( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], extra_params={"scope": "read_stream,offline_access"}) @@ -963,11 +1035,17 @@ class FacebookGraphMixin(OAuth2Mixin): Tornado it will change from a string to an integer. * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``, ``link``, plus any fields named in the ``extra_fields`` argument. These - fields are copied from the Facebook graph API `user object `_ + fields are copied from the Facebook graph API + `user object `_ .. versionchanged:: 4.5 The ``session_expires`` field was updated to support changes made to the Facebook API in March 2017. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ http = self.get_auth_http_client() args = { @@ -982,14 +1060,17 @@ class FacebookGraphMixin(OAuth2Mixin): if extra_fields: fields.update(extra_fields) - http.fetch(self._oauth_request_token_url(**args), - functools.partial(self._on_access_token, redirect_uri, client_id, - client_secret, callback, fields)) + fut = http.fetch(self._oauth_request_token_url(**args)) + fut.add_done_callback(functools.partial(self._on_access_token, redirect_uri, client_id, + client_secret, callback, fields)) + @gen.coroutine def _on_access_token(self, redirect_uri, client_id, client_secret, - future, fields, response): - if response.error: - future.set_exception(AuthError('Facebook auth error: %s' % str(response))) + future, fields, response_fut): + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError('Facebook auth error: %s' % str(e))) return args = escape.json_decode(response.body) @@ -998,10 +1079,8 @@ class FacebookGraphMixin(OAuth2Mixin): "expires_in": args.get("expires_in") } - self.facebook_request( + user = yield self.facebook_request( path="/me", - callback=functools.partial( - self._on_get_user_info, future, session, fields), access_token=session["access_token"], appsecret_proof=hmac.new(key=client_secret.encode('utf8'), msg=session["access_token"].encode('utf8'), @@ -1009,9 +1088,8 @@ class FacebookGraphMixin(OAuth2Mixin): fields=",".join(fields) ) - def _on_get_user_info(self, future, session, fields, user): if user is None: - future.set_result(None) + future_set_result_unless_cancelled(future, None) return fieldmap = {} @@ -1024,7 +1102,7 @@ class FacebookGraphMixin(OAuth2Mixin): # This should change in Tornado 5.0. fieldmap.update({"access_token": session["access_token"], "session_expires": str(session.get("expires_in"))}) - future.set_result(fieldmap) + future_set_result_unless_cancelled(future, fieldmap) @_auth_return_future def facebook_request(self, path, callback, access_token=None, @@ -1045,14 +1123,13 @@ class FacebookGraphMixin(OAuth2Mixin): Example usage: - ..testcode:: + .. testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated - @tornado.gen.coroutine - def get(self): - new_entry = yield self.facebook_request( + async def get(self): + new_entry = await self.facebook_request( "/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) @@ -1075,6 +1152,11 @@ class FacebookGraphMixin(OAuth2Mixin): .. versionchanged:: 3.1 Added the ability to override ``self._FACEBOOK_BASE_URL``. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ url = self._FACEBOOK_BASE_URL + path # Thanks to the _auth_return_future decorator, our "callback" diff --git a/server/www/packages/packages-common/tornado/autoreload.py b/server/www/packages/packages-common/tornado/autoreload.py index 60571ef..7d69474 100644 --- a/server/www/packages/packages-common/tornado/autoreload.py +++ b/server/www/packages/packages-common/tornado/autoreload.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -63,12 +62,11 @@ import sys # file.py gets added to the path, which can cause confusion as imports # may become relative in spite of the future import. # -# We address the former problem by setting the $PYTHONPATH environment -# variable before re-execution so the new process will see the correct -# path. We attempt to address the latter problem when tornado.autoreload -# is run as __main__, although we can't fix the general case because -# we cannot reliably reconstruct the original command line -# (http://bugs.python.org/issue14208). +# We address the former problem by reconstructing the original command +# line (Python >= 3.4) or by setting the $PYTHONPATH environment +# variable (Python < 3.4) before re-execution so the new process will +# see the correct path. We attempt to address the latter problem when +# tornado.autoreload is run as __main__. if __name__ == "__main__": # This sys.path manipulation must come before our imports (as much @@ -109,15 +107,18 @@ _watched_files = set() _reload_hooks = [] _reload_attempted = False _io_loops = weakref.WeakKeyDictionary() # type: ignore +_autoreload_is_main = False +_original_argv = None +_original_spec = None -def start(io_loop=None, check_time=500): +def start(check_time=500): """Begins watching source files for changes. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ - io_loop = io_loop or ioloop.IOLoop.current() + io_loop = ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True @@ -125,7 +126,7 @@ def start(io_loop=None, check_time=500): gen_log.warning("tornado.autoreload started more than once in the same process") modify_times = {} callback = functools.partial(_reload_on_update, modify_times) - scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) + scheduler = ioloop.PeriodicCallback(callback, check_time) scheduler.start() @@ -137,7 +138,7 @@ def wait(): the command-line interface in `main`) """ io_loop = ioloop.IOLoop() - start(io_loop) + io_loop.add_callback(start) io_loop.start() @@ -209,21 +210,33 @@ def _reload(): # ioloop.set_blocking_log_threshold so it doesn't fire # after the exec. signal.setitimer(signal.ITIMER_REAL, 0, 0) - # sys.path fixes: see comments at top of file. If sys.path[0] is an empty - # string, we were (probably) invoked with -m and the effective path - # is about to change on re-exec. Add the current directory to $PYTHONPATH - # to ensure that the new process sees the same path we did. - path_prefix = '.' + os.pathsep - if (sys.path[0] == '' and - not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): - os.environ["PYTHONPATH"] = (path_prefix + - os.environ.get("PYTHONPATH", "")) + # sys.path fixes: see comments at top of file. If __main__.__spec__ + # exists, we were invoked with -m and the effective path is about to + # change on re-exec. Reconstruct the original command line to + # ensure that the new process sees the same path we did. If + # __spec__ is not available (Python < 3.4), check instead if + # sys.path[0] is an empty string and add the current directory to + # $PYTHONPATH. + if _autoreload_is_main: + spec = _original_spec + argv = _original_argv + else: + spec = getattr(sys.modules['__main__'], '__spec__', None) + argv = sys.argv + if spec: + argv = ['-m', spec.name] + argv[1:] + else: + path_prefix = '.' + os.pathsep + if (sys.path[0] == '' and + not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): + os.environ["PYTHONPATH"] = (path_prefix + + os.environ.get("PYTHONPATH", "")) if not _has_execv: - subprocess.Popen([sys.executable] + sys.argv) - sys.exit(0) + subprocess.Popen([sys.executable] + argv) + os._exit(0) else: try: - os.execv(sys.executable, [sys.executable] + sys.argv) + os.execv(sys.executable, [sys.executable] + argv) except OSError: # Mac OS X versions prior to 10.6 do not support execv in # a process that contains multiple threads. Instead of @@ -236,8 +249,7 @@ def _reload(): # Unfortunately the errno returned in this case does not # appear to be consistent, so we can't easily check for # this error specifically. - os.spawnv(os.P_NOWAIT, sys.executable, - [sys.executable] + sys.argv) + os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + argv) # At this point the IOLoop has been closed and finally # blocks will experience errors if we allow the stack to # unwind, so just exit uncleanly. @@ -264,7 +276,17 @@ def main(): can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """ + # Remember that we were launched with autoreload as main. + # The main module can be tricky; set the variables both in our globals + # (which may be __main__) and the real importable version. + import tornado.autoreload + global _autoreload_is_main + global _original_argv, _original_spec + tornado.autoreload._autoreload_is_main = _autoreload_is_main = True original_argv = sys.argv + tornado.autoreload._original_argv = _original_argv = original_argv + original_spec = getattr(sys.modules['__main__'], '__spec__', None) + tornado.autoreload._original_spec = _original_spec = original_spec sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": mode = "module" diff --git a/server/www/packages/packages-common/tornado/concurrent.py b/server/www/packages/packages-common/tornado/concurrent.py index 667e6b1..78b2091 100644 --- a/server/www/packages/packages-common/tornado/concurrent.py +++ b/server/www/packages/packages-common/tornado/concurrent.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2012 Facebook # @@ -13,13 +12,19 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -"""Utilities for working with threads and ``Futures``. +"""Utilities for working with ``Future`` objects. ``Futures`` are a pattern for concurrent programming introduced in -Python 3.2 in the `concurrent.futures` package. This package defines -a mostly-compatible `Future` class designed for use from coroutines, -as well as some utility functions for interacting with the -`concurrent.futures` package. +Python 3.2 in the `concurrent.futures` package, and also adopted (in a +slightly different form) in Python 3.4's `asyncio` package. This +package defines a ``Future`` class that is an alias for `asyncio.Future` +when available, and a compatible implementation for older versions of +Python. It also includes some utility functions for interacting with +``Future`` objects. + +While this package is an important part of Tornado's internal +implementation, applications rarely need to interact with it +directly. """ from __future__ import absolute_import, division, print_function @@ -28,6 +33,7 @@ import platform import textwrap import traceback import sys +import warnings from tornado.log import app_log from tornado.stack_context import ExceptionStackContext, wrap @@ -38,6 +44,11 @@ try: except ImportError: futures = None +try: + import asyncio +except ImportError: + asyncio = None + try: import typing except ImportError: @@ -138,16 +149,17 @@ class Future(object): Tornado they are normally used with `.IOLoop.add_future` or by yielding them in a `.gen.coroutine`. - `tornado.concurrent.Future` is similar to - `concurrent.futures.Future`, but not thread-safe (and therefore - faster for use with single-threaded event loops). + `tornado.concurrent.Future` is an alias for `asyncio.Future` when + that package is available (Python 3.4+). Unlike + `concurrent.futures.Future`, the ``Futures`` used by Tornado and + `asyncio` are not thread-safe (and therefore faster for use with + single-threaded event loops). - In addition to ``exception`` and ``set_exception``, methods ``exc_info`` - and ``set_exc_info`` are supported to capture tracebacks in Python 2. - The traceback is automatically available in Python 3, but in the - Python 2 futures backport this information is discarded. - This functionality was previously available in a separate class - ``TracebackFuture``, which is now a deprecated alias for this class. + In addition to ``exception`` and ``set_exception``, Tornado's + ``Future`` implementation supports storing an ``exc_info`` triple + to support better tracebacks on Python 2. To set an ``exc_info`` + triple, use `future_set_exc_info`, and to retrieve one, call + `result()` (which will raise it). .. versionchanged:: 4.0 `tornado.concurrent.Future` is always a thread-unsafe ``Future`` @@ -164,6 +176,17 @@ class Future(object): where it results in undesired logging it may be necessary to suppress the logging by ensuring that the exception is observed: ``f.add_done_callback(lambda f: f.exception())``. + + .. versionchanged:: 5.0 + + This class was previoiusly available under the name + ``TracebackFuture``. This name, which was deprecated since + version 4.0, has been removed. When `asyncio` is available + ``tornado.concurrent.Future`` is now an alias for + `asyncio.Future`. Like `asyncio.Future`, callbacks are now + always scheduled on the `.IOLoop` and are never run + synchronously. + """ def __init__(self): self._done = False @@ -265,7 +288,8 @@ class Future(object): `add_done_callback` directly. """ if self._done: - fn(self) + from tornado.ioloop import IOLoop + IOLoop.current().add_callback(fn, self) else: self._callbacks.append(fn) @@ -320,13 +344,12 @@ class Future(object): def _set_done(self): self._done = True - for cb in self._callbacks: - try: - cb(self) - except Exception: - app_log.exception('Exception in callback %r for %r', - cb, self) - self._callbacks = None + if self._callbacks: + from tornado.ioloop import IOLoop + loop = IOLoop.current() + for cb in self._callbacks: + loop.add_callback(cb, self) + self._callbacks = None # On Python 3.3 or older, objects with a destructor part of a reference # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to @@ -344,7 +367,8 @@ class Future(object): self, ''.join(tb).rstrip()) -TracebackFuture = Future +if asyncio is not None: + Future = asyncio.Future # noqa if futures is None: FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]] @@ -358,11 +382,11 @@ def is_future(x): class DummyExecutor(object): def submit(self, fn, *args, **kwargs): - future = TracebackFuture() + future = Future() try: - future.set_result(fn(*args, **kwargs)) + future_set_result_unless_cancelled(future, fn(*args, **kwargs)) except Exception: - future.set_exc_info(sys.exc_info()) + future_set_exc_info(future, sys.exc_info()) return future def shutdown(self, wait=True): @@ -378,29 +402,53 @@ def run_on_executor(*args, **kwargs): The decorated method may be called with a ``callback`` keyword argument and returns a future. - The `.IOLoop` and executor to be used are determined by the ``io_loop`` - and ``executor`` attributes of ``self``. To use different attributes, - pass keyword arguments to the decorator:: + The executor to be used is determined by the ``executor`` + attributes of ``self``. To use a different attribute name, pass a + keyword argument to the decorator:: @run_on_executor(executor='_thread_pool') def foo(self): pass + This decorator should not be confused with the similarly-named + `.IOLoop.run_in_executor`. In general, using ``run_in_executor`` + when *calling* a blocking method is recommended instead of using + this decorator when *defining* a method. If compatibility with older + versions of Tornado is required, consider defining an executor + and using ``executor.submit()`` at the call site. + .. versionchanged:: 4.2 Added keyword arguments to use alternative attributes. + + .. versionchanged:: 5.0 + Always uses the current IOLoop instead of ``self.io_loop``. + + .. versionchanged:: 5.1 + Returns a `.Future` compatible with ``await`` instead of a + `concurrent.futures.Future`. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in + 6.0. The decorator itself is discouraged in new code but will + not be removed in 6.0. """ def run_on_executor_decorator(fn): executor = kwargs.get("executor", "executor") - io_loop = kwargs.get("io_loop", "io_loop") @functools.wraps(fn) def wrapper(self, *args, **kwargs): callback = kwargs.pop("callback", None) - future = getattr(self, executor).submit(fn, self, *args, **kwargs) + async_future = Future() + conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs) + chain_future(conc_future, async_future) if callback: - getattr(self, io_loop).add_future( - future, lambda future: callback(future.result())) - return future + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) + from tornado.ioloop import IOLoop + IOLoop.current().add_future( + async_future, lambda future: callback(future.result())) + return async_future return wrapper if args and kwargs: raise ValueError("cannot combine positional and keyword args") @@ -418,6 +466,10 @@ def return_future(f): """Decorator to make a function that returns via callback return a `Future`. + This decorator was provided to ease the transition from + callback-oriented code to coroutines. It is not recommended for + new code. + The wrapped function should take a ``callback`` keyword argument and invoke it with one argument when it has finished. To signal failure, the function can simply raise an exception (which will be @@ -425,13 +477,13 @@ def return_future(f): From the caller's perspective, the callback argument is optional. If one is given, it will be invoked when the function is complete - with `Future.result()` as an argument. If the function fails, the + with ``Future.result()`` as an argument. If the function fails, the callback will not be run and an exception will be raised into the surrounding `.StackContext`. If no callback is given, the caller should use the ``Future`` to wait for the function to complete (perhaps by yielding it in a - `.gen.engine` function, or passing it to `.IOLoop.add_future`). + coroutine, or passing it to `.IOLoop.add_future`). Usage: @@ -442,31 +494,52 @@ def return_future(f): # Do stuff (possibly asynchronous) callback(result) - @gen.engine - def caller(callback): - yield future_func(arg1, arg2) - callback() + async def caller(): + await future_func(arg1, arg2) .. Note that ``@return_future`` and ``@gen.engine`` can be applied to the same function, provided ``@return_future`` appears first. However, consider using ``@gen.coroutine`` instead of this combination. + + .. versionchanged:: 5.1 + + Now raises a `.DeprecationWarning` if a callback argument is passed to + the decorated function and deprecation warnings are enabled. + + .. deprecated:: 5.1 + + This decorator will be removed in Tornado 6.0. New code should + use coroutines directly instead of wrapping callback-based code + with this decorator. Interactions with non-Tornado + callback-based code should be managed explicitly to avoid + relying on the `.ExceptionStackContext` built into this + decorator. """ + warnings.warn("@return_future is deprecated, use coroutines instead", + DeprecationWarning) + return _non_deprecated_return_future(f) + + +def _non_deprecated_return_future(f): + # Allow auth.py to use this decorator without triggering + # deprecation warnings. This will go away once auth.py has removed + # its legacy interfaces in 6.0. replacer = ArgReplacer(f, 'callback') @functools.wraps(f) def wrapper(*args, **kwargs): - future = TracebackFuture() + future = Future() callback, args, kwargs = replacer.replace( - lambda value=_NO_RESULT: future.set_result(value), + lambda value=_NO_RESULT: future_set_result_unless_cancelled(future, value), args, kwargs) def handle_error(typ, value, tb): - future.set_exc_info((typ, value, tb)) + future_set_exc_info(future, (typ, value, tb)) return True exc_info = None - with ExceptionStackContext(handle_error): + with ExceptionStackContext(handle_error, delay_warning=True): try: result = f(*args, **kwargs) if result is not None: @@ -489,13 +562,16 @@ def return_future(f): # immediate exception, and again when the future resolves and # the callback triggers its exception by calling future.result()). if callback is not None: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) + def run_callback(future): result = future.result() if result is _NO_RESULT: callback() else: callback(future.result()) - future.add_done_callback(wrap(run_callback)) + future_add_done_callback(future, wrap(run_callback)) return future return wrapper @@ -505,17 +581,72 @@ def chain_future(a, b): The result (success or failure) of ``a`` will be copied to ``b``, unless ``b`` has already been completed or cancelled by the time ``a`` finishes. + + .. versionchanged:: 5.0 + + Now accepts both Tornado/asyncio `Future` objects and + `concurrent.futures.Future`. + """ def copy(future): assert future is a if b.done(): return - if (isinstance(a, TracebackFuture) and - isinstance(b, TracebackFuture) and + if (hasattr(a, 'exc_info') and a.exc_info() is not None): - b.set_exc_info(a.exc_info()) + future_set_exc_info(b, a.exc_info()) elif a.exception() is not None: b.set_exception(a.exception()) else: b.set_result(a.result()) - a.add_done_callback(copy) + if isinstance(a, Future): + future_add_done_callback(a, copy) + else: + # concurrent.futures.Future + from tornado.ioloop import IOLoop + IOLoop.current().add_future(a, copy) + + +def future_set_result_unless_cancelled(future, value): + """Set the given ``value`` as the `Future`'s result, if not cancelled. + + Avoids asyncio.InvalidStateError when calling set_result() on + a cancelled `asyncio.Future`. + + .. versionadded:: 5.0 + """ + if not future.cancelled(): + future.set_result(value) + + +def future_set_exc_info(future, exc_info): + """Set the given ``exc_info`` as the `Future`'s exception. + + Understands both `asyncio.Future` and Tornado's extensions to + enable better tracebacks on Python 2. + + .. versionadded:: 5.0 + """ + if hasattr(future, 'set_exc_info'): + # Tornado's Future + future.set_exc_info(exc_info) + else: + # asyncio.Future + future.set_exception(exc_info[1]) + + +def future_add_done_callback(future, callback): + """Arrange to call ``callback`` when ``future`` is complete. + + ``callback`` is invoked with one argument, the ``future``. + + If ``future`` is already done, ``callback`` is invoked immediately. + This may differ from the behavior of ``Future.add_done_callback``, + which makes no such guarantee. + + .. versionadded:: 5.0 + """ + if future.done(): + callback(future) + else: + future.add_done_callback(callback) diff --git a/server/www/packages/packages-common/tornado/curl_httpclient.py b/server/www/packages/packages-common/tornado/curl_httpclient.py index eef4a17..7f5cb10 100644 --- a/server/www/packages/packages-common/tornado/curl_httpclient.py +++ b/server/www/packages/packages-common/tornado/curl_httpclient.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -37,8 +36,8 @@ curl_log = logging.getLogger('tornado.curl_httpclient') class CurlAsyncHTTPClient(AsyncHTTPClient): - def initialize(self, io_loop, max_clients=10, defaults=None): - super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) + def initialize(self, max_clients=10, defaults=None): + super(CurlAsyncHTTPClient, self).initialize(defaults=defaults) self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) @@ -53,7 +52,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): # SOCKETFUNCTION. Mitigate the effects of such bugs by # forcing a periodic scan of all active requests. self._force_timeout_callback = ioloop.PeriodicCallback( - self._handle_force_timeout, 1000, io_loop=io_loop) + self._handle_force_timeout, 1000) self._force_timeout_callback.start() # Work around a bug in libcurl 7.29.0: Some fields in the curl @@ -74,8 +73,14 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): self._multi.close() super(CurlAsyncHTTPClient, self).close() + # Set below properties to None to reduce the reference count of current + # instance, because those properties hold some methods of current + # instance that will case circular reference. + self._force_timeout_callback = None + self._multi = None + def fetch_impl(self, request, callback): - self._requests.append((request, callback)) + self._requests.append((request, callback, self.io_loop.time())) self._process_queue() self._set_timeout(0) @@ -200,13 +205,15 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): while self._free_list and self._requests: started += 1 curl = self._free_list.pop() - (request, callback) = self._requests.popleft() + (request, callback, queue_start_time) = self._requests.popleft() curl.info = { "headers": httputil.HTTPHeaders(), "buffer": BytesIO(), "request": request, "callback": callback, + "queue_start_time": queue_start_time, "curl_start_time": time.time(), + "curl_start_ioloop_time": self.io_loop.current().time(), } try: self._curl_setup_request( @@ -252,9 +259,10 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): # the various curl timings are documented at # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html time_info = dict( - queue=info["curl_start_time"] - info["request"].start_time, + queue=info["curl_start_ioloop_time"] - info["queue_start_time"], namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), connect=curl.getinfo(pycurl.CONNECT_TIME), + appconnect=curl.getinfo(pycurl.APPCONNECT_TIME), pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), total=curl.getinfo(pycurl.TOTAL_TIME), @@ -265,7 +273,8 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, reason=info['headers'].get("X-Http-Reason", None), - request_time=time.time() - info["curl_start_time"], + request_time=self.io_loop.time() - info["curl_start_ioloop_time"], + start_time=info["curl_start_time"], time_info=time_info)) except Exception: self.handle_callback_exception(info["callback"]) @@ -313,17 +322,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): self.io_loop.add_callback(request.streaming_callback, chunk) else: write_function = buffer.write - if bytes is str: # py2 - curl.setopt(pycurl.WRITEFUNCTION, write_function) - else: # py3 - # Upstream pycurl doesn't support py3, but ubuntu 12.10 includes - # a fork/port. That version has a bug in which it passes unicode - # strings instead of bytes to the WRITEFUNCTION. This means that - # if you use a WRITEFUNCTION (which tornado always does), you cannot - # download arbitrary binary data. This needs to be fixed in the - # ported pycurl package, but in the meantime this lambda will - # make it work for downloading (utf8) text. - curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s))) + curl.setopt(pycurl.WRITEFUNCTION, write_function) curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) curl.setopt(pycurl.MAXREDIRS, request.max_redirects) curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) @@ -342,8 +341,8 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): curl.setopt(pycurl.PROXY, request.proxy_host) curl.setopt(pycurl.PROXYPORT, request.proxy_port) if request.proxy_username: - credentials = '%s:%s' % (request.proxy_username, - request.proxy_password) + credentials = httputil.encode_username_password(request.proxy_username, + request.proxy_password) curl.setopt(pycurl.PROXYUSERPWD, credentials) if (request.proxy_auth_mode is None or @@ -435,8 +434,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): curl.setopt(pycurl.INFILESIZE, len(request.body or '')) if request.auth_username is not None: - userpwd = "%s:%s" % (request.auth_username, request.auth_password or '') - if request.auth_mode is None or request.auth_mode == "basic": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) elif request.auth_mode == "digest": @@ -444,7 +441,9 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): else: raise ValueError("Unsupported auth_mode %s" % request.auth_mode) - curl.setopt(pycurl.USERPWD, native_str(userpwd)) + userpwd = httputil.encode_username_password(request.auth_username, + request.auth_password) + curl.setopt(pycurl.USERPWD, userpwd) curl_log.debug("%s %s (username: %r)", request.method, request.url, request.auth_username) else: @@ -494,8 +493,10 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): def _curl_debug(self, debug_type, debug_msg): debug_types = ('I', '<', '>', '<', '>') if debug_type == 0: + debug_msg = native_str(debug_msg) curl_log.debug('%s', debug_msg.strip()) elif debug_type in (1, 2): + debug_msg = native_str(debug_msg) for line in debug_msg.splitlines(): curl_log.debug('%s %s', debug_types[debug_type], line) elif debug_type == 4: diff --git a/server/www/packages/packages-common/tornado/escape.py b/server/www/packages/packages-common/tornado/escape.py index 2ca3fe3..a79ece6 100644 --- a/server/www/packages/packages-common/tornado/escape.py +++ b/server/www/packages/packages-common/tornado/escape.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -274,7 +273,9 @@ def recursive_unicode(obj): # This regex should avoid those problems. # Use to_unicode instead of tornado.util.u - we don't want backslashes getting # processed as escapes. -_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""")) +_URL_RE = re.compile(to_unicode( + r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""" # noqa: E501 +)) def linkify(text, shorten=False, extra_params="", @@ -289,24 +290,24 @@ def linkify(text, shorten=False, extra_params="", * ``shorten``: Long urls will be shortened for display. * ``extra_params``: Extra text to include in the link tag, or a callable - taking the link as an argument and returning the extra text - e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, - or:: + taking the link as an argument and returning the extra text + e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, + or:: - def extra_params_cb(url): - if url.startswith("http://example.com"): - return 'class="internal"' - else: - return 'class="external" rel="nofollow"' - linkify(text, extra_params=extra_params_cb) + def extra_params_cb(url): + if url.startswith("http://example.com"): + return 'class="internal"' + else: + return 'class="external" rel="nofollow"' + linkify(text, extra_params=extra_params_cb) * ``require_protocol``: Only linkify urls which include a protocol. If - this is False, urls such as www.facebook.com will also be linkified. + this is False, urls such as www.facebook.com will also be linkified. * ``permitted_protocols``: List (or set) of protocols which should be - linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", - "mailto"])``. It is very unsafe to include protocols such as - ``javascript``. + linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", + "mailto"])``. It is very unsafe to include protocols such as + ``javascript``. """ if extra_params and not callable(extra_params): extra_params = " " + extra_params.strip() diff --git a/server/www/packages/packages-common/tornado/gen.py b/server/www/packages/packages-common/tornado/gen.py index 99f9106..3556374 100644 --- a/server/www/packages/packages-common/tornado/gen.py +++ b/server/www/packages/packages-common/tornado/gen.py @@ -1,9 +1,23 @@ -"""``tornado.gen`` is a generator-based interface to make it easier to -work in an asynchronous environment. Code using the ``gen`` module -is technically asynchronous, but it is written as a single generator +"""``tornado.gen`` implements generator-based coroutines. + +.. note:: + + The "decorator and generator" approach in this module is a + precursor to native coroutines (using ``async def`` and ``await``) + which were introduced in Python 3.5. Applications that do not + require compatibility with older versions of Python should use + native coroutines instead. Some parts of this module are still + useful with native coroutines, notably `multi`, `sleep`, + `WaitIterator`, and `with_timeout`. Some of these functions have + counterparts in the `asyncio` module which may be used as well, + although the two may not necessarily be 100% compatible. + +Coroutines provide an easier way to work in an asynchronous +environment than chaining callbacks. Code using coroutines is +technically asynchronous, but it is written as a single generator instead of a collection of separate functions. -For example, the following asynchronous handler: +For example, the following callback-based asynchronous handler: .. testcode:: @@ -37,7 +51,7 @@ could be written with ``gen`` as: :hide: Most asynchronous functions in Tornado return a `.Future`; -yielding this object returns its `~.Future.result`. +yielding this object returns its ``Future.result``. You can also yield a list or dict of ``Futures``, which will be started at the same time and run in parallel; a list or dict of results will @@ -81,15 +95,15 @@ import functools import itertools import os import sys -import textwrap import types -import weakref +import warnings -from tornado.concurrent import Future, TracebackFuture, is_future, chain_future +from tornado.concurrent import (Future, is_future, chain_future, future_set_exc_info, + future_add_done_callback, future_set_result_unless_cancelled) from tornado.ioloop import IOLoop from tornado.log import app_log from tornado import stack_context -from tornado.util import PY3, raise_exc_info +from tornado.util import PY3, raise_exc_info, TimeoutError try: try: @@ -154,10 +168,6 @@ class ReturnValueIgnoredError(Exception): pass -class TimeoutError(Exception): - """Exception raised by ``with_timeout``.""" - - def _value_from_stopiteration(e): try: # StopIteration has a value attribute beginning in py33. @@ -173,6 +183,21 @@ def _value_from_stopiteration(e): return None +def _create_future(): + future = Future() + # Fixup asyncio debug info by removing extraneous stack entries + source_traceback = getattr(future, "_source_traceback", ()) + while source_traceback: + # Each traceback entry is equivalent to a + # (filename, self.lineno, self.name, self.line) tuple + filename = source_traceback[-1][0] + if filename == __file__: + del source_traceback[-1] + else: + break + return future + + def engine(func): """Callback-oriented decorator for asynchronous generators. @@ -189,7 +214,14 @@ def engine(func): they are finished. One notable exception is the `~tornado.web.RequestHandler` :ref:`HTTP verb methods `, which use ``self.finish()`` in place of a callback argument. + + .. deprecated:: 5.1 + + This decorator will be removed in 6.0. Use `coroutine` or + ``async def`` instead. """ + warnings.warn("gen.engine is deprecated, use gen.coroutine or async def instead", + DeprecationWarning) func = _make_coroutine_wrapper(func, replace_callback=False) @functools.wraps(func) @@ -204,11 +236,11 @@ def engine(func): # The engine interface doesn't give us any way to return # errors but to raise them into the stack context. # Save the stack context here to use when the Future has resolved. - future.add_done_callback(stack_context.wrap(final_callback)) + future_add_done_callback(future, stack_context.wrap(final_callback)) return wrapper -def coroutine(func, replace_callback=True): +def coroutine(func): """Decorator for asynchronous generators. Any generator that yields objects from this module must be wrapped @@ -229,9 +261,6 @@ def coroutine(func, replace_callback=True): ``callback`` argument is not visible inside the decorated function; it is handled by the decorator itself. - From the caller's perspective, ``@gen.coroutine`` is similar to - the combination of ``@return_future`` and ``@gen.engine``. - .. warning:: When exceptions occur inside a coroutine, the exception @@ -242,30 +271,14 @@ def coroutine(func, replace_callback=True): `.IOLoop.run_sync` for top-level calls, or passing the `.Future` to `.IOLoop.add_future`. + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ return _make_coroutine_wrapper(func, replace_callback=True) -# Ties lifetime of runners to their result futures. Github Issue #1769 -# Generators, like any object in Python, must be strong referenced -# in order to not be cleaned up by the garbage collector. When using -# coroutines, the Runner object is what strong-refs the inner -# generator. However, the only item that strong-reffed the Runner -# was the last Future that the inner generator yielded (via the -# Future's internal done_callback list). Usually this is enough, but -# it is also possible for this Future to not have any strong references -# other than other objects referenced by the Runner object (usually -# when using other callback patterns and/or weakrefs). In this -# situation, if a garbage collection ran, a cycle would be detected and -# Runner objects could be destroyed along with their inner generators -# and everything in their local scope. -# This map provides strong references to Runner objects as long as -# their result future objects also have strong references (typically -# from the parent coroutine's Runner). This keeps the coroutine's -# Runner alive. -_futures_to_runners = weakref.WeakKeyDictionary() - - def _make_coroutine_wrapper(func, replace_callback): """The inner workings of ``@gen.coroutine`` and ``@gen.engine``. @@ -281,9 +294,11 @@ def _make_coroutine_wrapper(func, replace_callback): @functools.wraps(wrapped) def wrapper(*args, **kwargs): - future = TracebackFuture() + future = _create_future() if replace_callback and 'callback' in kwargs: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning, stacklevel=2) callback = kwargs.pop('callback') IOLoop.current().add_future( future, lambda future: callback(future.result())) @@ -293,8 +308,12 @@ def _make_coroutine_wrapper(func, replace_callback): except (Return, StopIteration) as e: result = _value_from_stopiteration(e) except Exception: - future.set_exc_info(sys.exc_info()) - return future + future_set_exc_info(future, sys.exc_info()) + try: + return future + finally: + # Avoid circular references + future = None else: if isinstance(result, GeneratorType): # Inline the first iteration of Runner.run. This lets us @@ -306,17 +325,26 @@ def _make_coroutine_wrapper(func, replace_callback): orig_stack_contexts = stack_context._state.contexts yielded = next(result) if stack_context._state.contexts is not orig_stack_contexts: - yielded = TracebackFuture() + yielded = _create_future() yielded.set_exception( stack_context.StackContextInconsistentError( 'stack_context inconsistency (probably caused ' 'by yield within a "with StackContext" block)')) except (StopIteration, Return) as e: - future.set_result(_value_from_stopiteration(e)) + future_set_result_unless_cancelled(future, _value_from_stopiteration(e)) except Exception: - future.set_exc_info(sys.exc_info()) + future_set_exc_info(future, sys.exc_info()) else: - _futures_to_runners[future] = Runner(result, future, yielded) + # Provide strong references to Runner objects as long + # as their result future objects also have strong + # references (typically from the parent coroutine's + # Runner). This keeps the coroutine's Runner alive. + # We do this by exploiting the public API + # add_done_callback() instead of putting a private + # attribute on the Future. + # (Github issues #1769, #2229). + runner = Runner(result, future, yielded) + future.add_done_callback(lambda _: runner) yielded = None try: return future @@ -330,7 +358,7 @@ def _make_coroutine_wrapper(func, replace_callback): # used in the absence of cycles). We can avoid the # cycle by clearing the local variable after we return it. future = None - future.set_result(result) + future_set_result_unless_cancelled(future, result) return future wrapper.__wrapped__ = wrapped @@ -444,7 +472,7 @@ class WaitIterator(object): self._running_future = None for future in futures: - future.add_done_callback(self._done_callback) + future_add_done_callback(future, self._done_callback) def done(self): """Returns True if this iterator has no more results.""" @@ -460,7 +488,7 @@ class WaitIterator(object): Note that this `.Future` will not be the same object as any of the inputs. """ - self._running_future = TracebackFuture() + self._running_future = Future() if self._finished: self._return_result(self._finished.popleft()) @@ -482,9 +510,8 @@ class WaitIterator(object): self.current_future = done self.current_index = self._unfinished.pop(done) - @coroutine def __aiter__(self): - raise Return(self) + return self def __anext__(self): if self.done(): @@ -497,8 +524,13 @@ class YieldPoint(object): """Base class for objects that may be yielded from the generator. .. deprecated:: 4.0 - Use `Futures <.Future>` instead. + Use `Futures <.Future>` instead. This class and all its subclasses + will be removed in 6.0 """ + def __init__(self): + warnings.warn("YieldPoint is deprecated, use Futures instead", + DeprecationWarning) + def start(self, runner): """Called by the runner after the generator has yielded. @@ -535,9 +567,11 @@ class Callback(YieldPoint): is given it will be returned by `Wait`. .. deprecated:: 4.0 - Use `Futures <.Future>` instead. + Use `Futures <.Future>` instead. This class will be removed in 6.0. """ def __init__(self, key): + warnings.warn("gen.Callback is deprecated, use Futures instead", + DeprecationWarning) self.key = key def start(self, runner): @@ -555,9 +589,11 @@ class Wait(YieldPoint): """Returns the argument passed to the result of a previous `Callback`. .. deprecated:: 4.0 - Use `Futures <.Future>` instead. + Use `Futures <.Future>` instead. This class will be removed in 6.0. """ def __init__(self, key): + warnings.warn("gen.Wait is deprecated, use Futures instead", + DeprecationWarning) self.key = key def start(self, runner): @@ -579,9 +615,11 @@ class WaitAll(YieldPoint): `WaitAll` is equivalent to yielding a list of `Wait` objects. .. deprecated:: 4.0 - Use `Futures <.Future>` instead. + Use `Futures <.Future>` instead. This class will be removed in 6.0. """ def __init__(self, keys): + warnings.warn("gen.WaitAll is deprecated, use gen.multi instead", + DeprecationWarning) self.keys = keys def start(self, runner): @@ -605,33 +643,43 @@ def Task(func, *args, **kwargs): ``gen.Task`` is now a function that returns a `.Future`, instead of a subclass of `YieldPoint`. It still behaves the same way when yielded. + + .. deprecated:: 5.1 + This function is deprecated and will be removed in 6.0. """ - future = Future() + warnings.warn("gen.Task is deprecated, use Futures instead", + DeprecationWarning) + future = _create_future() def handle_exception(typ, value, tb): if future.done(): return False - future.set_exc_info((typ, value, tb)) + future_set_exc_info(future, (typ, value, tb)) return True def set_result(result): if future.done(): return - future.set_result(result) + future_set_result_unless_cancelled(future, result) with stack_context.ExceptionStackContext(handle_exception): func(*args, callback=_argument_adapter(set_result), **kwargs) return future class YieldFuture(YieldPoint): - def __init__(self, future, io_loop=None): + def __init__(self, future): """Adapts a `.Future` to the `YieldPoint` interface. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. deprecated:: 5.1 + This class will be removed in 6.0. """ + warnings.warn("YieldFuture is deprecated, use Futures instead", + DeprecationWarning) self.future = future - self.io_loop = io_loop or IOLoop.current() + self.io_loop = IOLoop.current() def start(self, runner): if not self.future.done(): @@ -704,6 +752,10 @@ def multi(children, quiet_exceptions=()): This function is available under the names ``multi()`` and ``Multi()`` for historical reasons. + Cancelling a `.Future` returned by ``multi()`` does not cancel its + children. `asyncio.gather` is similar to ``multi()``, but it does + cancel its children. + .. versionchanged:: 4.2 If multiple yieldables fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` @@ -741,9 +793,11 @@ class MultiYieldPoint(YieldPoint): remains as an alias for the equivalent `multi` function. .. deprecated:: 4.3 - Use `multi` instead. + Use `multi` instead. This class will be removed in 6.0. """ def __init__(self, children, quiet_exceptions=()): + warnings.warn("MultiYieldPoint is deprecated, use Futures instead", + DeprecationWarning) self.keys = None if isinstance(children, dict): self.keys = list(children.keys()) @@ -812,12 +866,13 @@ def multi_future(children, quiet_exceptions=()): else: keys = None children = list(map(convert_yielded, children)) - assert all(is_future(i) for i in children) + assert all(is_future(i) or isinstance(i, _NullFuture) for i in children) unfinished_children = set(children) - future = Future() + future = _create_future() if not children: - future.set_result({} if keys is not None else []) + future_set_result_unless_cancelled(future, + {} if keys is not None else []) def callback(f): unfinished_children.remove(f) @@ -832,18 +887,19 @@ def multi_future(children, quiet_exceptions=()): app_log.error("Multiple exceptions in yield list", exc_info=True) else: - future.set_exc_info(sys.exc_info()) + future_set_exc_info(future, sys.exc_info()) if not future.done(): if keys is not None: - future.set_result(dict(zip(keys, result_list))) + future_set_result_unless_cancelled(future, + dict(zip(keys, result_list))) else: - future.set_result(result_list) + future_set_result_unless_cancelled(future, result_list) listening = set() for f in children: if f not in listening: listening.add(f) - f.add_done_callback(callback) + future_add_done_callback(f, callback) return future @@ -863,18 +919,18 @@ def maybe_future(x): if is_future(x): return x else: - fut = Future() + fut = _create_future() fut.set_result(x) return fut -def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()): +def with_timeout(timeout, future, quiet_exceptions=()): """Wraps a `.Future` (or other yieldable object) in a timeout. - Raises `TimeoutError` if the input future does not complete before - ``timeout``, which may be specified in any form allowed by - `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time - relative to `.IOLoop.time`) + Raises `tornado.util.TimeoutError` if the input future does not + complete before ``timeout``, which may be specified in any form + allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or + an absolute time relative to `.IOLoop.time`) If the wrapped `.Future` fails after it has timed out, the exception will be logged unless it is of a type contained in ``quiet_exceptions`` @@ -882,6 +938,10 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()): Does not support `YieldPoint` subclasses. + The wrapped `.Future` is not canceled when the timeout expires, + permitting it to be reused. `asyncio.wait_for` is similar to this + function but it does cancel the wrapped `.Future` on timeout. + .. versionadded:: 4.0 .. versionchanged:: 4.1 @@ -890,6 +950,7 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()): .. versionchanged:: 4.4 Added support for yieldable objects other than `.Future`. + """ # TODO: allow YieldPoints in addition to other yieldables? # Tricky to do with stack_context semantics. @@ -900,10 +961,9 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()): # callers and B) concurrent futures can only be cancelled while they are # in the queue, so cancellation cannot reliably bound our waiting time. future = convert_yielded(future) - result = Future() + result = _create_future() chain_future(future, result) - if io_loop is None: - io_loop = IOLoop.current() + io_loop = IOLoop.current() def error_callback(future): try: @@ -914,17 +974,18 @@ def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()): future, exc_info=True) def timeout_callback(): - result.set_exception(TimeoutError("Timeout")) + if not result.done(): + result.set_exception(TimeoutError("Timeout")) # In case the wrapped future goes on to fail, log it. - future.add_done_callback(error_callback) + future_add_done_callback(future, error_callback) timeout_handle = io_loop.add_timeout( timeout, timeout_callback) if isinstance(future, Future): # We know this future will resolve on the IOLoop, so we don't # need the extra thread-safety of IOLoop.add_future (and we also # don't care about StackContext here. - future.add_done_callback( - lambda future: io_loop.remove_timeout(timeout_handle)) + future_add_done_callback( + future, lambda future: io_loop.remove_timeout(timeout_handle)) else: # concurrent.futures.Futures may resolve on any thread, so we # need to route them back to the IOLoop. @@ -947,15 +1008,31 @@ def sleep(duration): .. versionadded:: 4.1 """ - f = Future() - IOLoop.current().call_later(duration, lambda: f.set_result(None)) + f = _create_future() + IOLoop.current().call_later(duration, + lambda: future_set_result_unless_cancelled(f, None)) return f -_null_future = Future() -_null_future.set_result(None) +class _NullFuture(object): + """_NullFuture resembles a Future that finished with a result of None. -moment = Future() + It's not actually a `Future` to avoid depending on a particular event loop. + Handled as a special case in the coroutine runner. + """ + def result(self): + return None + + def done(self): + return True + + +# _null_future is used as a dummy value in the coroutine runner. It differs +# from moment in that moment always adds a delay of one IOLoop iteration +# while _null_future is processed as soon as possible. +_null_future = _NullFuture() + +moment = _NullFuture() moment.__doc__ = \ """A special object which may be yielded to allow the IOLoop to run for one iteration. @@ -968,9 +1045,9 @@ Usage: ``yield gen.moment`` .. versionadded:: 4.0 .. deprecated:: 4.5 - ``yield None`` is now equivalent to ``yield gen.moment``. + ``yield None`` (or ``yield`` with no argument) is now equivalent to + ``yield gen.moment``. """ -moment.set_result(None) class Runner(object): @@ -979,7 +1056,7 @@ class Runner(object): Maintains information about pending callbacks and their results. The results of the generator are stored in ``result_future`` (a - `.TracebackFuture`) + `.Future`) """ def __init__(self, gen, result_future, first_yielded): self.gen = gen @@ -1023,9 +1100,10 @@ class Runner(object): self.results[key] = result if self.yield_point is not None and self.yield_point.is_ready(): try: - self.future.set_result(self.yield_point.get_result()) + future_set_result_unless_cancelled(self.future, + self.yield_point.get_result()) except: - self.future.set_exc_info(sys.exc_info()) + future_set_exc_info(self.future, sys.exc_info()) self.yield_point = None self.run() @@ -1084,14 +1162,15 @@ class Runner(object): raise LeakedCallbackError( "finished without waiting for callbacks %r" % self.pending_callbacks) - self.result_future.set_result(_value_from_stopiteration(e)) + future_set_result_unless_cancelled(self.result_future, + _value_from_stopiteration(e)) self.result_future = None self._deactivate_stack_context() return except Exception: self.finished = True self.future = _null_future - self.result_future.set_exc_info(sys.exc_info()) + future_set_exc_info(self.result_future, sys.exc_info()) self.result_future = None self._deactivate_stack_context() return @@ -1110,19 +1189,18 @@ class Runner(object): if isinstance(yielded, YieldPoint): # YieldPoints are too closely coupled to the Runner to go # through the generic convert_yielded mechanism. - self.future = TracebackFuture() + self.future = Future() def start_yield_point(): try: yielded.start(self) if yielded.is_ready(): - self.future.set_result( - yielded.get_result()) + future_set_result_unless_cancelled(self.future, yielded.get_result()) else: self.yield_point = yielded except Exception: - self.future = TracebackFuture() - self.future.set_exc_info(sys.exc_info()) + self.future = Future() + future_set_exc_info(self.future, sys.exc_info()) if self.stack_context_deactivate is None: # Start a stack context if this is the first @@ -1142,13 +1220,16 @@ class Runner(object): try: self.future = convert_yielded(yielded) except BadYieldError: - self.future = TracebackFuture() - self.future.set_exc_info(sys.exc_info()) + self.future = Future() + future_set_exc_info(self.future, sys.exc_info()) - if not self.future.done() or self.future is moment: + if self.future is moment: + self.io_loop.add_callback(self.run) + return False + elif not self.future.done(): def inner(f): # Break a reference cycle to speed GC. - f = None # noqa + f = None # noqa self.run() self.io_loop.add_future( self.future, inner) @@ -1161,8 +1242,8 @@ class Runner(object): def handle_exception(self, typ, value, tb): if not self.running and not self.finished: - self.future = TracebackFuture() - self.future.set_exc_info((typ, value, tb)) + self.future = Future() + future_set_exc_info(self.future, (typ, value, tb)) self.run() return True else: @@ -1194,20 +1275,10 @@ def _argument_adapter(callback): return wrapper -# Convert Awaitables into Futures. It is unfortunately possible -# to have infinite recursion here if those Awaitables assume that -# we're using a different coroutine runner and yield objects -# we don't understand. If that happens, the solution is to -# register that runner's yieldable objects with convert_yielded. -if sys.version_info >= (3, 3): - exec(textwrap.dedent(""" - @coroutine - def _wrap_awaitable(x): - if hasattr(x, '__await__'): - x = x.__await__() - return (yield from x) - """)) -else: +# Convert Awaitables into Futures. +try: + import asyncio +except ImportError: # Py2-compatible version for use with Cython. # Copied from PEP 380. @coroutine @@ -1254,6 +1325,13 @@ else: _r = _value_from_stopiteration(_e) break raise Return(_r) +else: + try: + _wrap_awaitable = asyncio.ensure_future + except AttributeError: + # asyncio.ensure_future was introduced in Python 3.4.4, but + # Debian jessie still ships with 3.4.2 so try the old name. + _wrap_awaitable = getattr(asyncio, 'async') def convert_yielded(yielded): @@ -1271,8 +1349,10 @@ def convert_yielded(yielded): .. versionadded:: 4.1 """ # Lists and dicts containing YieldPoints were handled earlier. - if yielded is None: + if yielded is None or yielded is moment: return moment + elif yielded is _null_future: + return _null_future elif isinstance(yielded, (list, dict)): return multi(yielded) elif is_future(yielded): @@ -1285,19 +1365,3 @@ def convert_yielded(yielded): if singledispatch is not None: convert_yielded = singledispatch(convert_yielded) - - try: - # If we can import t.p.asyncio, do it for its side effect - # (registering asyncio.Future with convert_yielded). - # It's ugly to do this here, but it prevents a cryptic - # infinite recursion in _wrap_awaitable. - # Note that even with this, asyncio integration is unlikely - # to work unless the application also configures AsyncIOLoop, - # but at least the error messages in that case are more - # comprehensible than a stack overflow. - import tornado.platform.asyncio - except ImportError: - pass - else: - # Reference the imported module to make pyflakes happy. - tornado diff --git a/server/www/packages/packages-common/tornado/http1connection.py b/server/www/packages/packages-common/tornado/http1connection.py index 53744ec..af7abe7 100644 --- a/server/www/packages/packages-common/tornado/http1connection.py +++ b/server/www/packages/packages-common/tornado/http1connection.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2014 Facebook # @@ -22,8 +21,10 @@ from __future__ import absolute_import, division, print_function import re +import warnings -from tornado.concurrent import Future +from tornado.concurrent import (Future, future_add_done_callback, + future_set_result_unless_cancelled) from tornado.escape import native_str, utf8 from tornado import gen from tornado import httputil @@ -164,7 +165,6 @@ class HTTP1Connection(httputil.HTTPConnection): header_data = yield gen.with_timeout( self.stream.io_loop.time() + self.params.header_timeout, header_future, - io_loop=self.stream.io_loop, quiet_exceptions=iostream.StreamClosedError) except gen.TimeoutError: self.close() @@ -224,7 +224,7 @@ class HTTP1Connection(httputil.HTTPConnection): try: yield gen.with_timeout( self.stream.io_loop.time() + self._body_timeout, - body_future, self.stream.io_loop, + body_future, quiet_exceptions=iostream.StreamClosedError) except gen.TimeoutError: gen_log.info("Timeout reading body from %s", @@ -251,6 +251,8 @@ class HTTP1Connection(httputil.HTTPConnection): except httputil.HTTPInputError as e: gen_log.info("Malformed HTTP message from %s: %s", self.context, e) + if not self.is_client: + yield self.stream.write(b'HTTP/1.1 400 Bad Request\r\n\r\n') self.close() raise gen.Return(False) finally: @@ -276,8 +278,14 @@ class HTTP1Connection(httputil.HTTPConnection): def set_close_callback(self, callback): """Sets a callback that will be run when the connection is closed. - .. deprecated:: 4.0 - Use `.HTTPMessageDelegate.on_connection_close` instead. + Note that this callback is slightly different from + `.HTTPMessageDelegate.on_connection_close`: The + `.HTTPMessageDelegate` method is called when the connection is + closed while recieving a message. This callback is used when + there is not an active delegate (for example, on the server + side this callback is used if the client closes the connection + after sending its request but before receiving all the + response. """ self._close_callback = stack_context.wrap(callback) @@ -290,7 +298,7 @@ class HTTP1Connection(httputil.HTTPConnection): self._close_callback = None callback() if not self._finish_future.done(): - self._finish_future.set_result(None) + future_set_result_unless_cancelled(self._finish_future, None) self._clear_callbacks() def close(self): @@ -298,7 +306,7 @@ class HTTP1Connection(httputil.HTTPConnection): self.stream.close() self._clear_callbacks() if not self._finish_future.done(): - self._finish_future.set_result(None) + future_set_result_unless_cancelled(self._finish_future, None) def detach(self): """Take control of the underlying stream. @@ -312,7 +320,7 @@ class HTTP1Connection(httputil.HTTPConnection): stream = self.stream self.stream = None if not self._finish_future.done(): - self._finish_future.set_result(None) + future_set_result_unless_cancelled(self._finish_future, None) return stream def set_body_timeout(self, timeout): @@ -349,19 +357,22 @@ class HTTP1Connection(httputil.HTTPConnection): # self._request_start_line.version or # start_line.version? self._request_start_line.version == 'HTTP/1.1' and - # 304 responses have no body (not even a zero-length body), and so - # should not have either Content-Length or Transfer-Encoding. - # headers. + # 1xx, 204 and 304 responses have no body (not even a zero-length + # body), and so should not have either Content-Length or + # Transfer-Encoding headers. start_line.code not in (204, 304) and + (start_line.code < 100 or start_line.code >= 200) and # No need to chunk the output if a Content-Length is specified. 'Content-Length' not in headers and # Applications are discouraged from touching Transfer-Encoding, # but if they do, leave it alone. 'Transfer-Encoding' not in headers) + # If connection to a 1.1 client will be closed, inform client + if (self._request_start_line.version == 'HTTP/1.1' and self._disconnect_on_finish): + headers['Connection'] = 'close' # If a 1.0 client asked for keep-alive, add the header. if (self._request_start_line.version == 'HTTP/1.0' and - (self._request_headers.get('Connection', '').lower() == - 'keep-alive')): + self._request_headers.get('Connection', '').lower() == 'keep-alive'): headers['Connection'] = 'Keep-Alive' if self._chunking_output: headers['Transfer-Encoding'] = 'chunked' @@ -391,6 +402,8 @@ class HTTP1Connection(httputil.HTTPConnection): future.exception() else: if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) self._write_callback = stack_context.wrap(callback) else: future = self._write_future = Future() @@ -419,7 +432,7 @@ class HTTP1Connection(httputil.HTTPConnection): def write(self, chunk, callback=None): """Implements `.HTTPConnection.write`. - For backwards compatibility is is allowed but deprecated to + For backwards compatibility it is allowed but deprecated to skip `write_headers` and instead call `write()` with a pre-encoded header block. """ @@ -430,6 +443,8 @@ class HTTP1Connection(httputil.HTTPConnection): self._write_future.exception() else: if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) self._write_callback = stack_context.wrap(callback) else: future = self._write_future = Future() @@ -464,7 +479,7 @@ class HTTP1Connection(httputil.HTTPConnection): if self._pending_write is None: self._finish_request(None) else: - self._pending_write.add_done_callback(self._finish_request) + future_add_done_callback(self._pending_write, self._finish_request) def _on_write_complete(self, future): exc = future.exception() @@ -477,7 +492,7 @@ class HTTP1Connection(httputil.HTTPConnection): if self._write_future is not None: future = self._write_future self._write_future = None - future.set_result(None) + future_set_result_unless_cancelled(future, None) def _can_keep_alive(self, start_line, headers): if self.params.no_keep_alive: @@ -504,7 +519,7 @@ class HTTP1Connection(httputil.HTTPConnection): # default state for the next request. self.stream.set_nodelay(False) if not self._finish_future.done(): - self._finish_future.set_result(None) + future_set_result_unless_cancelled(self._finish_future, None) def _parse_headers(self, data): # The lstrip removes newlines that some implementations sometimes @@ -515,12 +530,7 @@ class HTTP1Connection(httputil.HTTPConnection): # RFC 7230 section allows for both CRLF and bare LF. eol = data.find("\n") start_line = data[:eol].rstrip("\r") - try: - headers = httputil.HTTPHeaders.parse(data[eol:]) - except ValueError: - # probably form split() if there was no ':' in the line - raise httputil.HTTPInputError("Malformed HTTP headers: %r" % - data[eol:100]) + headers = httputil.HTTPHeaders.parse(data[eol:]) return start_line, headers def _read_body(self, code, headers, delegate): @@ -592,6 +602,9 @@ class HTTP1Connection(httputil.HTTPConnection): chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64) chunk_len = int(chunk_len.strip(), 16) if chunk_len == 0: + crlf = yield self.stream.read_bytes(2) + if crlf != b'\r\n': + raise httputil.HTTPInputError("improperly terminated chunked request") return total_size += chunk_len if total_size > self._max_body_size: diff --git a/server/www/packages/packages-common/tornado/httpclient.py b/server/www/packages/packages-common/tornado/httpclient.py index 8436ece..5ed2ee6 100644 --- a/server/www/packages/packages-common/tornado/httpclient.py +++ b/server/www/packages/packages-common/tornado/httpclient.py @@ -42,11 +42,12 @@ from __future__ import absolute_import, division, print_function import functools import time +import warnings import weakref -from tornado.concurrent import TracebackFuture +from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado.escape import utf8, native_str -from tornado import httputil, stack_context +from tornado import gen, httputil, stack_context from tornado.ioloop import IOLoop from tornado.util import Configurable @@ -54,8 +55,10 @@ from tornado.util import Configurable class HTTPClient(object): """A blocking HTTP client. - This interface is provided for convenience and testing; most applications - that are running an IOLoop will want to use `AsyncHTTPClient` instead. + This interface is provided to make it easier to share code between + synchronous and asynchronous applications. Applications that are + running an `.IOLoop` must use `AsyncHTTPClient` instead. + Typical usage looks like this:: http_client = httpclient.HTTPClient() @@ -70,12 +73,26 @@ class HTTPClient(object): # Other errors are possible, such as IOError. print("Error: " + str(e)) http_client.close() + + .. versionchanged:: 5.0 + + Due to limitations in `asyncio`, it is no longer possible to + use the synchronous ``HTTPClient`` while an `.IOLoop` is running. + Use `AsyncHTTPClient` instead. + """ def __init__(self, async_client_class=None, **kwargs): + # Initialize self._closed at the beginning of the constructor + # so that an exception raised here doesn't lead to confusing + # failures in __del__. + self._closed = True self._io_loop = IOLoop(make_current=False) if async_client_class is None: async_client_class = AsyncHTTPClient - self._async_client = async_client_class(self._io_loop, **kwargs) + # Create the client while our IOLoop is "current", without + # clobbering the thread's real current IOLoop (if any). + self._async_client = self._io_loop.run_sync( + gen.coroutine(lambda: async_client_class(**kwargs))) self._closed = False def __del__(self): @@ -108,24 +125,24 @@ class AsyncHTTPClient(Configurable): Example usage:: - def handle_response(response): - if response.error: - print("Error: %s" % response.error) + async def f(): + http_client = AsyncHTTPClient() + try: + response = await http_client.fetch("http://www.google.com") + except Exception as e: + print("Error: %s" % e) else: print(response.body) - http_client = AsyncHTTPClient() - http_client.fetch("http://www.google.com/", handle_response) - The constructor for this class is magic in several respects: It actually creates an instance of an implementation-specific subclass, and instances are reused as a kind of pseudo-singleton - (one per `.IOLoop`). The keyword argument ``force_instance=True`` - can be used to suppress this singleton behavior. Unless - ``force_instance=True`` is used, no arguments other than - ``io_loop`` should be passed to the `AsyncHTTPClient` constructor. - The implementation subclass as well as arguments to its - constructor can be set with the static method `configure()` + (one per `.IOLoop`). The keyword argument ``force_instance=True`` + can be used to suppress this singleton behavior. Unless + ``force_instance=True`` is used, no arguments should be passed to + the `AsyncHTTPClient` constructor. The implementation subclass as + well as arguments to its constructor can be set with the static + method `configure()` All `AsyncHTTPClient` implementations support a ``defaults`` keyword argument, which can be used to set default values for @@ -137,8 +154,9 @@ class AsyncHTTPClient(Configurable): client = AsyncHTTPClient(force_instance=True, defaults=dict(user_agent="MyUserAgent")) - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + """ @classmethod def configurable_base(cls): @@ -156,16 +174,15 @@ class AsyncHTTPClient(Configurable): setattr(cls, attr_name, weakref.WeakKeyDictionary()) return getattr(cls, attr_name) - def __new__(cls, io_loop=None, force_instance=False, **kwargs): - io_loop = io_loop or IOLoop.current() + def __new__(cls, force_instance=False, **kwargs): + io_loop = IOLoop.current() if force_instance: instance_cache = None else: instance_cache = cls._async_clients() if instance_cache is not None and io_loop in instance_cache: return instance_cache[io_loop] - instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop, - **kwargs) + instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # Make sure the instance knows which cache to remove itself from. # It can't simply call _async_clients() because we may be in # __new__(AsyncHTTPClient) but instance.__class__ may be @@ -175,8 +192,8 @@ class AsyncHTTPClient(Configurable): instance_cache[instance.io_loop] = instance return instance - def initialize(self, io_loop, defaults=None): - self.io_loop = io_loop + def initialize(self, defaults=None): + self.io_loop = IOLoop.current() self.defaults = dict(HTTPRequest._DEFAULTS) if defaults is not None: self.defaults.update(defaults) @@ -222,6 +239,18 @@ class AsyncHTTPClient(Configurable): In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in 6.0. Use the returned `.Future` instead. + + The ``raise_error=False`` argument currently suppresses + *all* errors, encapsulating them in `HTTPResponse` objects + with a 599 response code. This will change in Tornado 6.0: + ``raise_error=False`` will only affect the `HTTPError` + raised when a non-200 response code is used. + """ if self._closed: raise RuntimeError("fetch() called on closed AsyncHTTPClient") @@ -235,8 +264,10 @@ class AsyncHTTPClient(Configurable): # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request = _RequestProxy(request, self.defaults) - future = TracebackFuture() + future = Future() if callback is not None: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) callback = stack_context.wrap(callback) def handle_future(future): @@ -254,9 +285,14 @@ class AsyncHTTPClient(Configurable): def handle_response(response): if raise_error and response.error: + if isinstance(response.error, HTTPError): + response.error.response = response future.set_exception(response.error) else: - future.set_result(response) + if response.error and not response._error_is_response_code: + warnings.warn("raise_error=False will allow '%s' to be raised in the future" % + response.error, DeprecationWarning) + future_set_result_unless_cancelled(future, response) self.fetch_impl(request, handle_response) return future @@ -318,8 +354,8 @@ class HTTPRequest(object): ssl_options=None): r"""All parameters except ``url`` are optional. - :arg string url: URL to fetch - :arg string method: HTTP method, e.g. "GET" or "POST" + :arg str url: URL to fetch + :arg str method: HTTP method, e.g. "GET" or "POST" :arg headers: Additional HTTP headers to pass on the request :type headers: `~tornado.httputil.HTTPHeaders` or `dict` :arg body: HTTP request body as a string (byte or unicode; if unicode @@ -335,9 +371,9 @@ class HTTPRequest(object): to pass a ``Content-Length`` in the headers as otherwise chunked encoding will be used, and many servers do not support chunked encoding on requests. New in Tornado 4.0 - :arg string auth_username: Username for HTTP authentication - :arg string auth_password: Password for HTTP authentication - :arg string auth_mode: Authentication mode; default is "basic". + :arg str auth_username: Username for HTTP authentication + :arg str auth_password: Password for HTTP authentication + :arg str auth_mode: Authentication mode; default is "basic". Allowed values are implementation-defined; ``curl_httpclient`` supports "basic" and "digest"; ``simple_httpclient`` only supports "basic" @@ -350,19 +386,19 @@ class HTTPRequest(object): :arg bool follow_redirects: Should redirects be followed automatically or return the 3xx response? Default True. :arg int max_redirects: Limit for ``follow_redirects``, default 5. - :arg string user_agent: String to send as ``User-Agent`` header + :arg str user_agent: String to send as ``User-Agent`` header :arg bool decompress_response: Request a compressed response from the server and decompress it after downloading. Default is True. New in Tornado 4.0. :arg bool use_gzip: Deprecated alias for ``decompress_response`` since Tornado 4.0. - :arg string network_interface: Network interface to use for request. + :arg str network_interface: Network interface to use for request. ``curl_httpclient`` only; see note below. - :arg callable streaming_callback: If set, ``streaming_callback`` will + :arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will be run with each chunk of data as it is received, and ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in the final response. - :arg callable header_callback: If set, ``header_callback`` will + :arg collections.abc.Callable header_callback: If set, ``header_callback`` will be run with each header line as it is received (including the first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line containing only ``\r\n``. All lines include the trailing newline @@ -370,28 +406,28 @@ class HTTPRequest(object): response. This is most useful in conjunction with ``streaming_callback``, because it's the only way to get access to header data while the request is in progress. - :arg callable prepare_curl_callback: If set, will be called with + :arg collections.abc.Callable prepare_curl_callback: If set, will be called with a ``pycurl.Curl`` object to allow the application to make additional ``setopt`` calls. - :arg string proxy_host: HTTP proxy hostname. To use proxies, + :arg str proxy_host: HTTP proxy hostname. To use proxies, ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``, ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are currently only supported with ``curl_httpclient``. :arg int proxy_port: HTTP proxy port - :arg string proxy_username: HTTP proxy username - :arg string proxy_password: HTTP proxy password - :arg string proxy_auth_mode: HTTP proxy Authentication mode; + :arg str proxy_username: HTTP proxy username + :arg str proxy_password: HTTP proxy password + :arg str proxy_auth_mode: HTTP proxy Authentication mode; default is "basic". supports "basic" and "digest" :arg bool allow_nonstandard_methods: Allow unknown values for ``method`` argument? Default is False. :arg bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True. - :arg string ca_certs: filename of CA certificates in PEM format, + :arg str ca_certs: filename of CA certificates in PEM format, or None to use defaults. See note below when used with ``curl_httpclient``. - :arg string client_key: Filename for client SSL key, if any. See + :arg str client_key: Filename for client SSL key, if any. See note below when used with ``curl_httpclient``. - :arg string client_cert: Filename for client SSL certificate, if any. + :arg str client_cert: Filename for client SSL certificate, if any. See note below when used with ``curl_httpclient``. :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in ``simple_httpclient`` (unsupported by ``curl_httpclient``). @@ -542,17 +578,35 @@ class HTTPResponse(object): * error: Exception object, if any - * request_time: seconds from request start to finish + * request_time: seconds from request start to finish. Includes all network + operations from DNS resolution to receiving the last byte of data. + Does not include time spent in the queue (due to the ``max_clients`` option). + If redirects were followed, only includes the final request. + + * start_time: Time at which the HTTP operation started, based on `time.time` + (not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request + timed out while in the queue. * time_info: dictionary of diagnostic timing information from the request. Available data are subject to change, but currently uses timings available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html, plus ``queue``, which is the delay (if any) introduced by waiting for a slot under `AsyncHTTPClient`'s ``max_clients`` setting. + + .. versionadded:: 5.1 + + Added the ``start_time`` attribute. + + .. versionchanged:: 5.1 + + The ``request_time`` attribute previously included time spent in the queue + for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time + is excluded in both implementations. ``request_time`` is now more accurate for + ``curl_httpclient`` because it uses a monotonic clock when available. """ def __init__(self, request, code, headers=None, buffer=None, effective_url=None, error=None, request_time=None, - time_info=None, reason=None): + time_info=None, reason=None, start_time=None): if isinstance(request, _RequestProxy): self.request = request.request else: @@ -569,14 +623,17 @@ class HTTPResponse(object): self.effective_url = request.url else: self.effective_url = effective_url + self._error_is_response_code = False if error is None: if self.code < 200 or self.code >= 300: + self._error_is_response_code = True self.error = HTTPError(self.code, message=self.reason, response=self) else: self.error = None else: self.error = error + self.start_time = start_time self.request_time = request_time self.time_info = time_info or {} @@ -599,7 +656,7 @@ class HTTPResponse(object): return "%s(%s)" % (self.__class__.__name__, args) -class HTTPError(Exception): +class HTTPClientError(Exception): """Exception thrown for an unsuccessful HTTP request. Attributes: @@ -612,12 +669,18 @@ class HTTPError(Exception): Note that if ``follow_redirects`` is False, redirects become HTTPErrors, and you can look at ``error.response.headers['Location']`` to see the destination of the redirect. + + .. versionchanged:: 5.1 + + Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with + `tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains + as an alias. """ def __init__(self, code, message=None, response=None): self.code = code self.message = message or httputil.responses.get(code, "Unknown") self.response = response - super(HTTPError, self).__init__(code, message, response) + super(HTTPClientError, self).__init__(code, message, response) def __str__(self): return "HTTP %d: %s" % (self.code, self.message) @@ -629,6 +692,9 @@ class HTTPError(Exception): __repr__ = __str__ +HTTPError = HTTPClientError + + class _RequestProxy(object): """Combines an object with a dictionary of defaults. @@ -654,6 +720,8 @@ def main(): define("print_body", type=bool, default=True) define("follow_redirects", type=bool, default=True) define("validate_cert", type=bool, default=True) + define("proxy_host", type=str) + define("proxy_port", type=int) args = parse_command_line() client = HTTPClient() for arg in args: @@ -661,6 +729,8 @@ def main(): response = client.fetch(arg, follow_redirects=options.follow_redirects, validate_cert=options.validate_cert, + proxy_host=options.proxy_host, + proxy_port=options.proxy_port, ) except HTTPError as e: if e.response is not None: diff --git a/server/www/packages/packages-common/tornado/httpserver.py b/server/www/packages/packages-common/tornado/httpserver.py index d757be1..3498d71 100644 --- a/server/www/packages/packages-common/tornado/httpserver.py +++ b/server/www/packages/packages-common/tornado/httpserver.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -77,7 +76,7 @@ class HTTPServer(TCPServer, Configurable, ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), os.path.join(data_dir, "mydomain.key")) - HTTPServer(applicaton, ssl_options=ssl_ctx) + HTTPServer(application, ssl_options=ssl_ctx) `HTTPServer` initialization follows one of three patterns (the initialization methods are defined on `tornado.tcpserver.TCPServer`): @@ -134,6 +133,9 @@ class HTTPServer(TCPServer, Configurable, .. versionchanged:: 4.5 Added the ``trusted_downstream`` argument. + + .. versionchanged:: 5.0 + The ``io_loop`` argument has been removed. """ def __init__(self, *args, **kwargs): # Ignore args to __init__; real initialization belongs in @@ -143,7 +145,7 @@ class HTTPServer(TCPServer, Configurable, # completely) pass - def initialize(self, request_callback, no_keep_alive=False, io_loop=None, + def initialize(self, request_callback, no_keep_alive=False, xheaders=False, ssl_options=None, protocol=None, decompress_request=False, chunk_size=None, max_header_size=None, @@ -151,7 +153,6 @@ class HTTPServer(TCPServer, Configurable, max_body_size=None, max_buffer_size=None, trusted_downstream=None): self.request_callback = request_callback - self.no_keep_alive = no_keep_alive self.xheaders = xheaders self.protocol = protocol self.conn_params = HTTP1ConnectionParameters( @@ -162,7 +163,7 @@ class HTTPServer(TCPServer, Configurable, max_body_size=max_body_size, body_timeout=body_timeout, no_keep_alive=no_keep_alive) - TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options, + TCPServer.__init__(self, ssl_options=ssl_options, max_buffer_size=max_buffer_size, read_chunk_size=chunk_size) self._connections = set() @@ -285,6 +286,10 @@ class _HTTPRequestContext(object): proto_header = headers.get( "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol)) + if proto_header: + # use only the last proto entry if there is more than one + # TODO: support trusting mutiple layers of proxied protocol + proto_header = proto_header.split(',')[-1].strip() if proto_header in ("http", "https"): self.protocol = proto_header diff --git a/server/www/packages/packages-common/tornado/httputil.py b/server/www/packages/packages-common/tornado/httputil.py index 818ea91..3961446 100644 --- a/server/www/packages/packages-common/tornado/httputil.py +++ b/server/www/packages/packages-common/tornado/httputil.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -30,10 +29,12 @@ import email.utils import numbers import re import time +import unicodedata +import warnings from tornado.escape import native_str, parse_qs_bytes, utf8 from tornado.log import gen_log -from tornado.util import ObjectDict, PY3 +from tornado.util import ObjectDict, PY3, unicode_type if PY3: import http.cookies as Cookie @@ -61,7 +62,7 @@ except ImportError: SSLError = _SSLError # type: ignore try: - import typing + import typing # noqa: F401 except ImportError: pass @@ -184,11 +185,16 @@ class HTTPHeaders(collections.MutableMapping): """ if line[0].isspace(): # continuation of a multi-line header + if self._last_key is None: + raise HTTPInputError("first header line cannot start with whitespace") new_part = ' ' + line.lstrip() self._as_list[self._last_key][-1] += new_part self._dict[self._last_key] += new_part else: - name, value = line.split(":", 1) + try: + name, value = line.split(":", 1) + except ValueError: + raise HTTPInputError("no colon in header line") self.add(name, value.strip()) @classmethod @@ -198,6 +204,12 @@ class HTTPHeaders(collections.MutableMapping): >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.items()) [('Content-Length', '42'), ('Content-Type', 'text/html')] + + .. versionchanged:: 5.1 + + Raises `HTTPInputError` on malformed headers instead of a + mix of `KeyError`, and `ValueError`. + """ h = cls() for line in _CRLF_RE.split(headers): @@ -370,10 +382,15 @@ class HTTPServerRequest(object): """Returns True if this request supports HTTP/1.1 semantics. .. deprecated:: 4.0 - Applications are less likely to need this information with the - introduction of `.HTTPConnection`. If you still need it, access - the ``version`` attribute directly. + + Applications are less likely to need this information with + the introduction of `.HTTPConnection`. If you still need + it, access the ``version`` attribute directly. This method + will be removed in Tornado 6.0. + """ + warnings.warn("supports_http_1_1() is deprecated, use request.version instead", + DeprecationWarning) return self.version == "HTTP/1.1" @property @@ -402,8 +419,10 @@ class HTTPServerRequest(object): .. deprecated:: 4.0 Use ``request.connection`` and the `.HTTPConnection` methods - to write the response. + to write the response. This method will be removed in Tornado 6.0. """ + warnings.warn("req.write deprecated, use req.connection.write and write_headers instead", + DeprecationWarning) assert isinstance(chunk, bytes) assert self.version.startswith("HTTP/1."), \ "deprecated interface only supported in HTTP/1.x" @@ -414,8 +433,10 @@ class HTTPServerRequest(object): .. deprecated:: 4.0 Use ``request.connection`` and the `.HTTPConnection` methods - to write the response. + to write the response. This method will be removed in Tornado 6.0. """ + warnings.warn("req.finish deprecated, use req.connection.finish instead", + DeprecationWarning) self.connection.finish() self._finish_time = time.time() @@ -467,8 +488,7 @@ class HTTPServerRequest(object): def __repr__(self): attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) - return "%s(%s, headers=%s)" % ( - self.__class__.__name__, args, dict(self.headers)) + return "%s(%s)" % (self.__class__.__name__, args) class HTTPInputError(Exception): @@ -572,6 +592,11 @@ class HTTPConnection(object): The ``version`` field of ``start_line`` is ignored. Returns a `.Future` if no callback is given. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. """ raise NotImplementedError() @@ -580,6 +605,11 @@ class HTTPConnection(object): The callback will be run when the write is complete. If no callback is given, returns a Future. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. """ raise NotImplementedError() @@ -753,6 +783,11 @@ def parse_multipart_form_data(boundary, data, arguments, files): The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. + + .. versionchanged:: 5.1 + + Now recognizes non-ASCII filenames in RFC 2231/5987 + (``filename*=``) format. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine @@ -829,6 +864,8 @@ def parse_request_start_line(line): try: method, path, version = line.split(" ") except ValueError: + # https://tools.ietf.org/html/rfc7230#section-3.1.1 + # invalid request-line SHOULD respond with a 400 (Bad Request) raise HTTPInputError("Malformed HTTP request line") if not re.match(r"^HTTP/1\.[0-9]$", version): raise HTTPInputError( @@ -859,7 +896,8 @@ def parse_response_start_line(line): # The original 2.7 version of this code did not correctly support some # combinations of semicolons and double quotes. # It has also been modified to support valueless parameters as seen in -# websocket extension negotiations. +# websocket extension negotiations, and to support non-ascii values in +# RFC 2231/5987 format. def _parseparam(s): @@ -876,25 +914,37 @@ def _parseparam(s): def _parse_header(line): - """Parse a Content-type like header. + r"""Parse a Content-type like header. Return the main content-type and a dictionary of options. + >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st" + >>> ct, d = _parse_header(d) + >>> ct + 'form-data' + >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape') + True + >>> d['foo'] + 'b\\a"r' """ parts = _parseparam(';' + line) key = next(parts) - pdict = {} + # decode_params treats first argument special, but we already stripped key + params = [('Dummy', 'value')] for p in parts: i = p.find('=') if i >= 0: name = p[:i].strip().lower() value = p[i + 1:].strip() - if len(value) >= 2 and value[0] == value[-1] == '"': - value = value[1:-1] - value = value.replace('\\\\', '\\').replace('\\"', '"') - pdict[name] = value - else: - pdict[p] = None + params.append((name, native_str(value))) + params = email.utils.decode_params(params) + params.pop(0) # get rid of the dummy again + pdict = {} + for name, value in params: + value = email.utils.collapse_rfc2231_value(value) + if len(value) >= 2 and value[0] == '"' and value[-1] == '"': + value = value[1:-1] + pdict[name] = value return key, pdict @@ -918,6 +968,20 @@ def _encode_header(key, pdict): return '; '.join(out) +def encode_username_password(username, password): + """Encodes a username/password pair in the format used by HTTP auth. + + The return value is a byte string in the form ``username:password``. + + .. versionadded:: 5.1 + """ + if isinstance(username, unicode_type): + username = unicodedata.normalize('NFC', username) + if isinstance(password, unicode_type): + password = unicodedata.normalize('NFC', password) + return utf8(username) + b":" + utf8(password) + + def doctests(): import doctest return doctest.DocTestSuite() @@ -940,6 +1004,16 @@ def split_host_and_port(netloc): return (host, port) +def qs_to_qsl(qs): + """Generator converting a result of ``parse_qs`` back to name-value pairs. + + .. versionadded:: 5.0 + """ + for k, vs in qs.items(): + for v in vs: + yield (k, v) + + _OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") _QuotePatt = re.compile(r"[\\].") _nulljoin = ''.join diff --git a/server/www/packages/packages-common/tornado/ioloop.py b/server/www/packages/packages-common/tornado/ioloop.py index ad35787..f7ee6dd 100644 --- a/server/www/packages/packages-common/tornado/ioloop.py +++ b/server/www/packages/packages-common/tornado/ioloop.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -16,14 +15,19 @@ """An I/O event loop for non-blocking sockets. -Typical applications will use a single `IOLoop` object, in the -`IOLoop.instance` singleton. The `IOLoop.start` method should usually -be called at the end of the ``main()`` function. Atypical applications may -use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` -case. +On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop. + +Typical applications will use a single `IOLoop` object, accessed via +`IOLoop.current` class method. The `IOLoop.start` method (or +equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually +be called at the end of the ``main()`` function. Atypical applications +may use more than one `IOLoop`, such as one `IOLoop` per thread, or +per `unittest` case. + +In addition to I/O events, the `IOLoop` can also schedule time-based +events. `IOLoop.add_timeout` is a non-blocking alternative to +`time.sleep`. -In addition to I/O events, the `IOLoop` can also schedule time-based events. -`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`. """ from __future__ import absolute_import, division, print_function @@ -43,40 +47,50 @@ import threading import time import traceback import math +import random -from tornado.concurrent import TracebackFuture, is_future +from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501 from tornado.log import app_log, gen_log from tornado.platform.auto import set_close_exec, Waker from tornado import stack_context -from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds +from tornado.util import ( + PY3, Configurable, errno_from_exception, timedelta_to_seconds, + TimeoutError, unicode_type, import_object, +) try: import signal except ImportError: signal = None +try: + from concurrent.futures import ThreadPoolExecutor +except ImportError: + ThreadPoolExecutor = None if PY3: import _thread as thread else: import thread +try: + import asyncio +except ImportError: + asyncio = None + _POLL_TIMEOUT = 3600.0 -class TimeoutError(Exception): - pass - - class IOLoop(Configurable): """A level-triggered I/O loop. - We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they - are available, or else we fall back on select(). If you are - implementing a system that needs to handle thousands of - simultaneous connections, you should use a system that supports - either ``epoll`` or ``kqueue``. + On Python 3, `IOLoop` is a wrapper around the `asyncio` event + loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD + and Mac OS X) if they are available, or else we fall back on + select(). If you are implementing a system that needs to handle + thousands of simultaneous connections, you should use a system + that supports either ``epoll`` or ``kqueue``. Example usage for a simple TCP server: @@ -84,9 +98,17 @@ class IOLoop(Configurable): import errno import functools - import tornado.ioloop import socket + import tornado.ioloop + from tornado import gen + from tornado.iostream import IOStream + + async def handle_connection(connection, address): + stream = IOStream(connection) + message = await stream.read_until_close() + print("message from client:", message.decode().strip()) + def connection_ready(sock, fd, events): while True: try: @@ -102,7 +124,7 @@ class IOLoop(Configurable): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) - sock.bind(("", port)) + sock.bind(("", 8888)) sock.listen(128) io_loop = tornado.ioloop.IOLoop.current() @@ -121,9 +143,26 @@ class IOLoop(Configurable): current instance. If ``make_current=False``, the new `IOLoop` will not try to become current. + In general, an `IOLoop` cannot survive a fork or be shared across + processes in any way. When multiple processes are being used, each + process should create its own `IOLoop`, which also implies that + any objects which depend on the `IOLoop` (such as + `.AsyncHTTPClient`) must also be created in the child processes. + As a guideline, anything that starts processes (including the + `tornado.process` and `multiprocessing` modules) should do so as + early as possible, ideally the first thing the application does + after loading its configuration in ``main()``. + .. versionchanged:: 4.2 Added the ``make_current`` keyword argument to the `IOLoop` constructor. + + .. versionchanged:: 5.0 + + Uses the `asyncio` event loop by default. The + ``IOLoop.configure`` method cannot be used on Python 3 except + to redundantly specify the `asyncio` event loop. + """ # Constants from the epoll module _EPOLLIN = 0x001 @@ -141,54 +180,75 @@ class IOLoop(Configurable): WRITE = _EPOLLOUT ERROR = _EPOLLERR | _EPOLLHUP - # Global lock for creating global IOLoop instance - _instance_lock = threading.Lock() - + # In Python 2, _current.instance points to the current IOLoop. _current = threading.local() + # In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops. + _ioloop_for_asyncio = dict() + + @classmethod + def configure(cls, impl, **kwargs): + if asyncio is not None: + from tornado.platform.asyncio import BaseAsyncIOLoop + + if isinstance(impl, (str, unicode_type)): + impl = import_object(impl) + if not issubclass(impl, BaseAsyncIOLoop): + raise RuntimeError( + "only AsyncIOLoop is allowed when asyncio is available") + super(IOLoop, cls).configure(impl, **kwargs) + @staticmethod def instance(): - """Returns a global `IOLoop` instance. + """Deprecated alias for `IOLoop.current()`. - Most applications have a single, global `IOLoop` running on the - main thread. Use this method to get this instance from - another thread. In most other cases, it is better to use `current()` - to get the current thread's `IOLoop`. + .. versionchanged:: 5.0 + + Previously, this method returned a global singleton + `IOLoop`, in contrast with the per-thread `IOLoop` returned + by `current()`. In nearly all cases the two were the same + (when they differed, it was generally used from non-Tornado + threads to communicate back to the main thread's `IOLoop`). + This distinction is not present in `asyncio`, so in order + to facilitate integration with that package `instance()` + was changed to be an alias to `current()`. Applications + using the cross-thread communications aspect of + `instance()` should instead set their own global variable + to point to the `IOLoop` they want to use. + + .. deprecated:: 5.0 """ - if not hasattr(IOLoop, "_instance"): - with IOLoop._instance_lock: - if not hasattr(IOLoop, "_instance"): - # New instance after double check - IOLoop._instance = IOLoop() - return IOLoop._instance - - @staticmethod - def initialized(): - """Returns true if the singleton instance has been created.""" - return hasattr(IOLoop, "_instance") + return IOLoop.current() def install(self): - """Installs this `IOLoop` object as the singleton instance. + """Deprecated alias for `make_current()`. - This is normally not necessary as `instance()` will create - an `IOLoop` on demand, but you may want to call `install` to use - a custom subclass of `IOLoop`. + .. versionchanged:: 5.0 - When using an `IOLoop` subclass, `install` must be called prior - to creating any objects that implicitly create their own - `IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`). + Previously, this method would set this `IOLoop` as the + global singleton used by `IOLoop.instance()`. Now that + `instance()` is an alias for `current()`, `install()` + is an alias for `make_current()`. + + .. deprecated:: 5.0 """ - assert not IOLoop.initialized() - IOLoop._instance = self + self.make_current() @staticmethod def clear_instance(): - """Clear the global `IOLoop` instance. + """Deprecated alias for `clear_current()`. + + .. versionchanged:: 5.0 + + Previously, this method would clear the `IOLoop` used as + the global singleton by `IOLoop.instance()`. Now that + `instance()` is an alias for `current()`, + `clear_instance()` is an alias for `clear_current()`. + + .. deprecated:: 5.0 - .. versionadded:: 4.0 """ - if hasattr(IOLoop, "_instance"): - del IOLoop._instance + IOLoop.clear_current() @staticmethod def current(instance=True): @@ -196,22 +256,42 @@ class IOLoop(Configurable): If an `IOLoop` is currently running or has been marked as current by `make_current`, returns that instance. If there is - no current `IOLoop`, returns `IOLoop.instance()` (i.e. the - main thread's `IOLoop`, creating one if necessary) if ``instance`` - is true. - - In general you should use `IOLoop.current` as the default when - constructing an asynchronous object, and use `IOLoop.instance` - when you mean to communicate to the main thread from a different - one. + no current `IOLoop` and ``instance`` is true, creates one. .. versionchanged:: 4.1 Added ``instance`` argument to control the fallback to `IOLoop.instance()`. + .. versionchanged:: 5.0 + On Python 3, control of the current `IOLoop` is delegated + to `asyncio`, with this and other methods as pass-through accessors. + The ``instance`` argument now controls whether an `IOLoop` + is created automatically when there is none, instead of + whether we fall back to `IOLoop.instance()` (which is now + an alias for this method). ``instance=False`` is deprecated, + since even if we do not create an `IOLoop`, this method + may initialize the asyncio loop. """ - current = getattr(IOLoop._current, "instance", None) - if current is None and instance: - return IOLoop.instance() + if asyncio is None: + current = getattr(IOLoop._current, "instance", None) + if current is None and instance: + current = IOLoop() + if IOLoop._current.instance is not current: + raise RuntimeError("new IOLoop did not become current") + else: + try: + loop = asyncio.get_event_loop() + except (RuntimeError, AssertionError): + if not instance: + return None + raise + try: + return IOLoop._ioloop_for_asyncio[loop] + except KeyError: + if instance: + from tornado.platform.asyncio import AsyncIOMainLoop + current = AsyncIOMainLoop(make_current=True) + else: + current = None return current def make_current(self): @@ -226,12 +306,38 @@ class IOLoop(Configurable): .. versionchanged:: 4.1 An `IOLoop` created while there is no current `IOLoop` will automatically become current. + + .. versionchanged:: 5.0 + This method also sets the current `asyncio` event loop. """ + # The asyncio event loops override this method. + assert asyncio is None + old = getattr(IOLoop._current, "instance", None) + if old is not None: + old.clear_current() IOLoop._current.instance = self @staticmethod def clear_current(): - IOLoop._current.instance = None + """Clears the `IOLoop` for the current thread. + + Intended primarily for use by test frameworks in between tests. + + .. versionchanged:: 5.0 + This method also clears the current `asyncio` event loop. + """ + old = IOLoop.current(instance=False) + if old is not None: + old._clear_current_hook() + if asyncio is None: + IOLoop._current.instance = None + + def _clear_current_hook(self): + """Instance method called when an IOLoop ceases to be current. + + May be overridden by subclasses as a counterpart to make_current. + """ + pass @classmethod def configurable_base(cls): @@ -239,22 +345,19 @@ class IOLoop(Configurable): @classmethod def configurable_default(cls): - if hasattr(select, "epoll"): - from tornado.platform.epoll import EPollIOLoop - return EPollIOLoop - if hasattr(select, "kqueue"): - # Python 2.6+ on BSD or Mac - from tornado.platform.kqueue import KQueueIOLoop - return KQueueIOLoop - from tornado.platform.select import SelectIOLoop - return SelectIOLoop + if asyncio is not None: + from tornado.platform.asyncio import AsyncIOLoop + return AsyncIOLoop + return PollIOLoop def initialize(self, make_current=None): if make_current is None: if IOLoop.current(instance=False) is None: self.make_current() elif make_current: - if IOLoop.current(instance=False) is not None: + current = IOLoop.current(instance=False) + # AsyncIO loops can already be current by this point. + if current is not None and current is not self: raise RuntimeError("current IOLoop already exists") self.make_current() @@ -333,6 +436,12 @@ class IOLoop(Configurable): documentation for the `signal` module for more information. If ``action`` is None, the process will be killed if it is blocked for too long. + + .. deprecated:: 5.0 + + Not implemented on the `asyncio` event loop. Use the environment + variable ``PYTHONASYNCIODEBUG=1`` instead. This method will be + removed in Tornado 6.0. """ raise NotImplementedError() @@ -342,6 +451,12 @@ class IOLoop(Configurable): Equivalent to ``set_blocking_signal_threshold(seconds, self.log_stack)`` + + .. deprecated:: 5.0 + + Not implemented on the `asyncio` event loop. Use the environment + variable ``PYTHONASYNCIODEBUG=1`` instead. This method will be + removed in Tornado 6.0. """ self.set_blocking_signal_threshold(seconds, self.log_stack) @@ -349,6 +464,10 @@ class IOLoop(Configurable): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. + + .. deprecated:: 5.1 + + This method will be removed in Tornado 6.0. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, @@ -384,17 +503,6 @@ class IOLoop(Configurable): If the event loop is not currently running, the next call to `start()` will return immediately. - To use asynchronous methods from otherwise-synchronous code (such as - unit tests), you can start and stop the event loop like this:: - - ioloop = IOLoop() - async_method(ioloop=ioloop, callback=ioloop.stop) - ioloop.start() - - ``ioloop.start()`` will return after ``async_method`` has run - its callback, whether that callback was invoked before or - after ``ioloop.start``. - Note that even after `stop` has been called, the `IOLoop` is not completely stopped until `IOLoop.start` has also returned. Some work that was scheduled before the call to `stop` may still @@ -405,29 +513,32 @@ class IOLoop(Configurable): def run_sync(self, func, timeout=None): """Starts the `IOLoop`, runs the given function, and stops the loop. - The function must return either a yieldable object or - ``None``. If the function returns a yieldable object, the - `IOLoop` will run until the yieldable is resolved (and - `run_sync()` will return the yieldable's result). If it raises + The function must return either an awaitable object or + ``None``. If the function returns an awaitable object, the + `IOLoop` will run until the awaitable is resolved (and + `run_sync()` will return the awaitable's result). If it raises an exception, the `IOLoop` will stop and the exception will be re-raised to the caller. The keyword-only argument ``timeout`` may be used to set a maximum duration for the function. If the timeout expires, - a `TimeoutError` is raised. + a `tornado.util.TimeoutError` is raised. - This method is useful in conjunction with `tornado.gen.coroutine` - to allow asynchronous calls in a ``main()`` function:: + This method is useful to allow asynchronous calls in a + ``main()`` function:: - @gen.coroutine - def main(): + async def main(): # do stuff... if __name__ == '__main__': IOLoop.current().run_sync(main) .. versionchanged:: 4.3 - Returning a non-``None``, non-yieldable value is now an error. + Returning a non-``None``, non-awaitable value is now an error. + + .. versionchanged:: 5.0 + If a timeout occurs, the ``func`` coroutine will be cancelled. + """ future_cell = [None] @@ -438,22 +549,29 @@ class IOLoop(Configurable): from tornado.gen import convert_yielded result = convert_yielded(result) except Exception: - future_cell[0] = TracebackFuture() - future_cell[0].set_exc_info(sys.exc_info()) + future_cell[0] = Future() + future_set_exc_info(future_cell[0], sys.exc_info()) else: if is_future(result): future_cell[0] = result else: - future_cell[0] = TracebackFuture() + future_cell[0] = Future() future_cell[0].set_result(result) self.add_future(future_cell[0], lambda future: self.stop()) self.add_callback(run) if timeout is not None: - timeout_handle = self.add_timeout(self.time() + timeout, self.stop) + def timeout_callback(): + # If we can cancel the future, do so and wait on it. If not, + # Just stop the loop and return with the task still pending. + # (If we neither cancel nor wait for the task, a warning + # will be logged). + if not future_cell[0].cancel(): + self.stop() + timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback) self.start() if timeout is not None: self.remove_timeout(timeout_handle) - if not future_cell[0].done(): + if future_cell[0].cancelled() or not future_cell[0].done(): raise TimeoutError('Operation timed out after %s seconds' % timeout) return future_cell[0].result() @@ -590,11 +708,46 @@ class IOLoop(Configurable): The callback is invoked with one argument, the `.Future`. + + This method only accepts `.Future` objects and not other + awaitables (unlike most of Tornado where the two are + interchangeable). """ assert is_future(future) callback = stack_context.wrap(callback) - future.add_done_callback( - lambda future: self.add_callback(callback, future)) + future_add_done_callback( + future, lambda future: self.add_callback(callback, future)) + + def run_in_executor(self, executor, func, *args): + """Runs a function in a ``concurrent.futures.Executor``. If + ``executor`` is ``None``, the IO loop's default executor will be used. + + Use `functools.partial` to pass keyword arguments to ``func``. + + .. versionadded:: 5.0 + """ + if ThreadPoolExecutor is None: + raise RuntimeError( + "concurrent.futures is required to use IOLoop.run_in_executor") + + if executor is None: + if not hasattr(self, '_executor'): + from tornado.process import cpu_count + self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5)) + executor = self._executor + c_future = executor.submit(func, *args) + # Concurrent Futures are not usable with await. Wrap this in a + # Tornado Future instead, using self.add_future for thread-safety. + t_future = Future() + self.add_future(c_future, lambda f: chain_future(f, t_future)) + return t_future + + def set_default_executor(self, executor): + """Sets the default executor to use with :meth:`run_in_executor`. + + .. versionadded:: 5.0 + """ + self._executor = executor def _run_callback(self, callback): """Runs a callback with error handling. @@ -634,6 +787,16 @@ class IOLoop(Configurable): The exception itself is not passed explicitly, but is available in `sys.exc_info`. + + .. versionchanged:: 5.0 + + When the `asyncio` event loop is used (which is now the + default on Python 3), some callback errors will be handled by + `asyncio` instead of this method. + + .. deprecated: 5.1 + + Support for this method will be removed in Tornado 6.0. """ app_log.error("Exception in callback %r", callback, exc_info=True) @@ -701,6 +864,7 @@ class PollIOLoop(IOLoop): self._stopped = False self._closing = False self._thread_ident = None + self._pid = os.getpid() self._blocking_signal_threshold = None self._timeout_counter = itertools.count() @@ -711,6 +875,22 @@ class PollIOLoop(IOLoop): lambda fd, events: self._waker.consume(), self.READ) + @classmethod + def configurable_base(cls): + return PollIOLoop + + @classmethod + def configurable_default(cls): + if hasattr(select, "epoll"): + from tornado.platform.epoll import EPollIOLoop + return EPollIOLoop + if hasattr(select, "kqueue"): + # Python 2.6+ on BSD or Mac + from tornado.platform.kqueue import KQueueIOLoop + return KQueueIOLoop + from tornado.platform.select import SelectIOLoop + return SelectIOLoop + def close(self, all_fds=False): self._closing = True self.remove_handler(self._waker.fileno()) @@ -721,6 +901,8 @@ class PollIOLoop(IOLoop): self._impl.close() self._callbacks = None self._timeouts = None + if hasattr(self, '_executor'): + self._executor.shutdown() def add_handler(self, fd, handler, events): fd, obj = self.split_fd(fd) @@ -753,12 +935,15 @@ class PollIOLoop(IOLoop): def start(self): if self._running: raise RuntimeError("IOLoop is already running") + if os.getpid() != self._pid: + raise RuntimeError("Cannot share PollIOLoops across processes") self._setup_logging() if self._stopped: self._stopped = False return - old_current = getattr(IOLoop._current, "instance", None) - IOLoop._current.instance = self + old_current = IOLoop.current(instance=False) + if old_current is not self: + self.make_current() self._thread_ident = thread.get_ident() self._running = True @@ -901,7 +1086,10 @@ class PollIOLoop(IOLoop): self._stopped = False if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, 0, 0) - IOLoop._current.instance = old_current + if old_current is None: + IOLoop.clear_current() + elif old_current is not self: + old_current.make_current() if old_wakeup_fd is not None: signal.set_wakeup_fd(old_wakeup_fd) @@ -982,25 +1170,40 @@ class PeriodicCallback(object): Note that the timeout is given in milliseconds, while most other time-related functions in Tornado use seconds. + If ``jitter`` is specified, each callback time will be randomly selected + within a window of ``jitter * callback_time`` milliseconds. + Jitter can be used to reduce alignment of events with similar periods. + A jitter of 0.1 means allowing a 10% variation in callback time. + The window is centered on ``callback_time`` so the total number of calls + within a given interval should not be significantly affected by adding + jitter. + If the callback runs for longer than ``callback_time`` milliseconds, subsequent invocations will be skipped to get back on schedule. `start` must be called after the `PeriodicCallback` is created. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. versionchanged:: 5.1 + The ``jitter`` argument is added. """ - def __init__(self, callback, callback_time, io_loop=None): + def __init__(self, callback, callback_time, jitter=0): self.callback = callback if callback_time <= 0: raise ValueError("Periodic callback must have a positive callback_time") self.callback_time = callback_time - self.io_loop = io_loop or IOLoop.current() + self.jitter = jitter self._running = False self._timeout = None def start(self): """Starts the timer.""" + # Looking up the IOLoop here allows to first instantiate the + # PeriodicCallback in another thread, then start it using + # IOLoop.add_callback(). + self.io_loop = IOLoop.current() self._running = True self._next_timeout = self.io_loop.time() self._schedule_next() @@ -1031,11 +1234,34 @@ class PeriodicCallback(object): def _schedule_next(self): if self._running: - current_time = self.io_loop.time() - - if self._next_timeout <= current_time: - callback_time_sec = self.callback_time / 1000.0 - self._next_timeout += (math.floor((current_time - self._next_timeout) / - callback_time_sec) + 1) * callback_time_sec - + self._update_next(self.io_loop.time()) self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) + + def _update_next(self, current_time): + callback_time_sec = self.callback_time / 1000.0 + if self.jitter: + # apply jitter fraction + callback_time_sec *= 1 + (self.jitter * (random.random() - 0.5)) + if self._next_timeout <= current_time: + # The period should be measured from the start of one call + # to the start of the next. If one call takes too long, + # skip cycles to get back to a multiple of the original + # schedule. + self._next_timeout += (math.floor((current_time - self._next_timeout) / + callback_time_sec) + 1) * callback_time_sec + else: + # If the clock moved backwards, ensure we advance the next + # timeout instead of recomputing the same value again. + # This may result in long gaps between callbacks if the + # clock jumps backwards by a lot, but the far more common + # scenario is a small NTP adjustment that should just be + # ignored. + # + # Note that on some systems if time.time() runs slower + # than time.monotonic() (most common on windows), we + # effectively experience a small backwards time jump on + # every iteration because PeriodicCallback uses + # time.time() while asyncio schedules callbacks using + # time.monotonic(). + # https://github.com/tornadoweb/tornado/issues/2333 + self._next_timeout += callback_time_sec diff --git a/server/www/packages/packages-common/tornado/iostream.py b/server/www/packages/packages-common/tornado/iostream.py index a1619c4..89e1e23 100644 --- a/server/www/packages/packages-common/tornado/iostream.py +++ b/server/www/packages/packages-common/tornado/iostream.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -28,16 +27,18 @@ from __future__ import absolute_import, division, print_function import collections import errno +import io import numbers import os import socket import sys import re +import warnings -from tornado.concurrent import TracebackFuture +from tornado.concurrent import Future from tornado import ioloop from tornado.log import gen_log, app_log -from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults +from tornado.netutil import ssl_wrap_socket, _client_ssl_defaults, _server_ssl_defaults from tornado import stack_context from tornado.util import errno_from_exception @@ -66,7 +67,7 @@ _ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE, errno.ETIMEDOUT) if hasattr(errno, "WSAECONNRESET"): - _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore + _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore # noqa: E501 if sys.platform == 'darwin': # OSX appears to have a race condition that causes send(2) to return @@ -117,6 +118,96 @@ class StreamBufferFullError(Exception): """ +class _StreamBuffer(object): + """ + A specialized buffer that tries to avoid copies when large pieces + of data are encountered. + """ + + def __init__(self): + # A sequence of (False, bytearray) and (True, memoryview) objects + self._buffers = collections.deque() + # Position in the first buffer + self._first_pos = 0 + self._size = 0 + + def __len__(self): + return self._size + + # Data above this size will be appended separately instead + # of extending an existing bytearray + _large_buf_threshold = 2048 + + def append(self, data): + """ + Append the given piece of data (should be a buffer-compatible object). + """ + size = len(data) + if size > self._large_buf_threshold: + if not isinstance(data, memoryview): + data = memoryview(data) + self._buffers.append((True, data)) + elif size > 0: + if self._buffers: + is_memview, b = self._buffers[-1] + new_buf = is_memview or len(b) >= self._large_buf_threshold + else: + new_buf = True + if new_buf: + self._buffers.append((False, bytearray(data))) + else: + b += data + + self._size += size + + def peek(self, size): + """ + Get a view over at most ``size`` bytes (possibly fewer) at the + current buffer position. + """ + assert size > 0 + try: + is_memview, b = self._buffers[0] + except IndexError: + return memoryview(b'') + + pos = self._first_pos + if is_memview: + return b[pos:pos + size] + else: + return memoryview(b)[pos:pos + size] + + def advance(self, size): + """ + Advance the current buffer position by ``size`` bytes. + """ + assert 0 < size <= self._size + self._size -= size + pos = self._first_pos + + buffers = self._buffers + while buffers and size > 0: + is_large, b = buffers[0] + b_remain = len(b) - size - pos + if b_remain <= 0: + buffers.popleft() + size -= len(b) - pos + pos = 0 + elif is_large: + pos += size + size = 0 + else: + # Amortized O(1) shrink for Python 2 + pos += size + if len(b) <= 2 * pos: + del b[:pos] + pos = 0 + size = 0 + + assert size == 0 + self._first_pos = pos + + class BaseIOStream(object): """A utility class to write to and read from a non-blocking file or socket. @@ -135,12 +226,10 @@ class BaseIOStream(object): Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, `read_from_fd`, and optionally `get_fd_error`. """ - def __init__(self, io_loop=None, max_buffer_size=None, + def __init__(self, max_buffer_size=None, read_chunk_size=None, max_write_buffer_size=None): """`BaseIOStream` constructor. - :arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`. - Deprecated since Tornado 4.1. :arg max_buffer_size: Maximum amount of incoming data to buffer; defaults to 100MB. :arg read_chunk_size: Amount of data to read at one time from the @@ -151,8 +240,11 @@ class BaseIOStream(object): .. versionchanged:: 4.0 Add the ``max_write_buffer_size`` parameter. Changed default ``read_chunk_size`` to 64KB. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been + removed. """ - self.io_loop = io_loop or ioloop.IOLoop.current() + self.io_loop = ioloop.IOLoop.current() self.max_buffer_size = max_buffer_size or 104857600 # A chunk size that is too close to max_buffer_size can cause # spurious failures. @@ -163,13 +255,11 @@ class BaseIOStream(object): self._read_buffer = bytearray() self._read_buffer_pos = 0 self._read_buffer_size = 0 - self._write_buffer = bytearray() - self._write_buffer_pos = 0 - self._write_buffer_size = 0 - self._write_buffer_frozen = False + self._user_read_buffer = False + self._after_user_read_buffer = None + self._write_buffer = _StreamBuffer() self._total_write_index = 0 self._total_write_done_index = 0 - self._pending_writes_while_frozen = [] self._read_delimiter = None self._read_regex = None self._read_max_bytes = None @@ -213,13 +303,18 @@ class BaseIOStream(object): """ raise NotImplementedError() - def read_from_fd(self): + def read_from_fd(self, buf): """Attempts to read from the underlying file. - Returns ``None`` if there was nothing to read (the socket - returned `~errno.EWOULDBLOCK` or equivalent), otherwise - returns the data. When possible, should return no more than - ``self.read_chunk_size`` bytes at a time. + Reads up to ``len(buf)`` bytes, storing them in the buffer. + Returns the number of bytes read. Returns None if there was + nothing to read (the socket returned `~errno.EWOULDBLOCK` or + equivalent), and zero on EOF. + + .. versionchanged:: 5.0 + + Interface redesigned to take a buffer and return a number + of bytes instead of a freshly-allocated object. """ raise NotImplementedError() @@ -248,6 +343,12 @@ class BaseIOStream(object): .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + """ future = self._set_read_callback(callback) self._read_regex = re.compile(regex) @@ -257,7 +358,7 @@ class BaseIOStream(object): except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=True) + self.close(exc_info=e) return future except: if future is not None: @@ -281,6 +382,11 @@ class BaseIOStream(object): .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. """ future = self._set_read_callback(callback) self._read_delimiter = delimiter @@ -290,7 +396,7 @@ class BaseIOStream(object): except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=True) + self.close(exc_info=e) return future except: if future is not None: @@ -314,12 +420,73 @@ class BaseIOStream(object): .. versionchanged:: 4.0 Added the ``partial`` argument. The callback argument is now optional and a `.Future` will be returned if it is omitted. + + .. deprecated:: 5.1 + + The ``callback`` and ``streaming_callback`` arguments are + deprecated and will be removed in Tornado 6.0. Use the + returned `.Future` (and ``partial=True`` for + ``streaming_callback``) instead. + """ future = self._set_read_callback(callback) assert isinstance(num_bytes, numbers.Integral) self._read_bytes = num_bytes self._read_partial = partial - self._streaming_callback = stack_context.wrap(streaming_callback) + if streaming_callback is not None: + warnings.warn("streaming_callback is deprecated, use partial instead", + DeprecationWarning) + self._streaming_callback = stack_context.wrap(streaming_callback) + try: + self._try_inline_read() + except: + if future is not None: + future.add_done_callback(lambda f: f.exception()) + raise + return future + + def read_into(self, buf, callback=None, partial=False): + """Asynchronously read a number of bytes. + + ``buf`` must be a writable buffer into which data will be read. + If a callback is given, it will be run with the number of read + bytes as an argument; if not, this method returns a `.Future`. + + If ``partial`` is true, the callback is run as soon as any bytes + have been read. Otherwise, it is run when the ``buf`` has been + entirely filled with read data. + + .. versionadded:: 5.0 + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + + """ + future = self._set_read_callback(callback) + + # First copy data already in read buffer + available_bytes = self._read_buffer_size + n = len(buf) + if available_bytes >= n: + end = self._read_buffer_pos + n + buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos:end] + del self._read_buffer[:end] + self._after_user_read_buffer = self._read_buffer + elif available_bytes > 0: + buf[:available_bytes] = memoryview(self._read_buffer)[self._read_buffer_pos:] + + # Set up the supplied buffer as our temporary read buffer. + # The original (if it had any data remaining) has been + # saved for later. + self._user_read_buffer = True + self._read_buffer = buf + self._read_buffer_pos = 0 + self._read_buffer_size = available_bytes + self._read_bytes = n + self._read_partial = partial + try: self._try_inline_read() except: @@ -347,9 +514,19 @@ class BaseIOStream(object): The callback argument is now optional and a `.Future` will be returned if it is omitted. + .. deprecated:: 5.1 + + The ``callback`` and ``streaming_callback`` arguments are + deprecated and will be removed in Tornado 6.0. Use the + returned `.Future` (and `read_bytes` with ``partial=True`` + for ``streaming_callback``) instead. + """ future = self._set_read_callback(callback) - self._streaming_callback = stack_context.wrap(streaming_callback) + if streaming_callback is not None: + warnings.warn("streaming_callback is deprecated, use read_bytes(partial=True) instead", + DeprecationWarning) + self._streaming_callback = stack_context.wrap(streaming_callback) if self.closed(): if self._streaming_callback is not None: self._run_read_callback(self._read_buffer_size, True) @@ -383,28 +560,32 @@ class BaseIOStream(object): .. versionchanged:: 4.5 Added support for `memoryview` arguments. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + """ self._check_closed() if data: if (self.max_write_buffer_size is not None and - self._write_buffer_size + len(data) > self.max_write_buffer_size): + len(self._write_buffer) + len(data) > self.max_write_buffer_size): raise StreamBufferFullError("Reached maximum write buffer size") - if self._write_buffer_frozen: - self._pending_writes_while_frozen.append(data) - else: - self._write_buffer += data - self._write_buffer_size += len(data) + self._write_buffer.append(data) self._total_write_index += len(data) if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) self._write_callback = stack_context.wrap(callback) future = None else: - future = TracebackFuture() + future = Future() future.add_done_callback(lambda f: f.exception()) self._write_futures.append((self._total_write_index, future)) if not self._connecting: self._handle_write() - if self._write_buffer_size: + if self._write_buffer: self._add_io_state(self.io_loop.WRITE) self._maybe_add_error_listener() return future @@ -412,9 +593,14 @@ class BaseIOStream(object): def set_close_callback(self, callback): """Call the given callback when the stream is closed. - This is not necessary for applications that use the `.Future` - interface; all outstanding ``Futures`` will resolve with a - `StreamClosedError` when the stream is closed. + This mostly is not necessary for applications that use the + `.Future` interface; all outstanding ``Futures`` will resolve + with a `StreamClosedError` when the stream is closed. However, + it is still useful as a way to signal that the stream has been + closed while no other read or write is in progress. + + Unlike other callback-based interfaces, ``set_close_callback`` + will not be removed in Tornado 6.0. """ self._close_callback = stack_context.wrap(callback) self._maybe_add_error_listener() @@ -428,10 +614,14 @@ class BaseIOStream(object): """ if not self.closed(): if exc_info: - if not isinstance(exc_info, tuple): - exc_info = sys.exc_info() - if any(exc_info): + if isinstance(exc_info, tuple): self.error = exc_info[1] + elif isinstance(exc_info, BaseException): + self.error = exc_info + else: + exc_info = sys.exc_info() + if any(exc_info): + self.error = exc_info[1] if self._read_until_close: if (self._streaming_callback is not None and self._read_buffer_size): @@ -463,6 +653,7 @@ class BaseIOStream(object): self._ssl_connect_future = None for future in futures: future.set_exception(StreamClosedError(real_error=self.error)) + future.exception() if self._close_callback is not None: cb = self._close_callback self._close_callback = None @@ -473,7 +664,6 @@ class BaseIOStream(object): # if the IOStream object is kept alive by a reference cycle. # TODO: Clear the read buffer too; it currently breaks some tests. self._write_buffer = None - self._write_buffer_size = 0 def reading(self): """Returns true if we are currently reading from the stream.""" @@ -481,7 +671,7 @@ class BaseIOStream(object): def writing(self): """Returns true if we are currently writing to the stream.""" - return self._write_buffer_size > 0 + return bool(self._write_buffer) def closed(self): """Returns true if the stream has been closed.""" @@ -548,11 +738,11 @@ class BaseIOStream(object): self.io_loop.update_handler(self.fileno(), self._state) except UnsatisfiableReadError as e: gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=True) - except Exception: + self.close(exc_info=e) + except Exception as e: gen_log.error("Uncaught exception, closing connection.", exc_info=True) - self.close(exc_info=True) + self.close(exc_info=e) raise def _run_callback(self, callback, *args): @@ -560,14 +750,14 @@ class BaseIOStream(object): self._pending_callbacks -= 1 try: return callback(*args) - except Exception: + except Exception as e: app_log.error("Uncaught exception, closing connection.", exc_info=True) # Close the socket on an uncaught exception from a user callback # (It would eventually get closed when the socket object is # gc'd, but we don't want to rely on gc happening before we # run out of file descriptors) - self.close(exc_info=True) + self.close(exc_info=e) # Re-raise the exception so that IOLoop.handle_callback_exception # can see it and log the error raise @@ -657,7 +847,7 @@ class BaseIOStream(object): raise except Exception as e: gen_log.warning("error on read: %s" % e) - self.close(exc_info=True) + self.close(exc_info=e) return if pos is not None: self._read_from_buffer(pos) @@ -669,12 +859,23 @@ class BaseIOStream(object): assert self._read_callback is None, "Already reading" assert self._read_future is None, "Already reading" if callback is not None: + warnings.warn("callbacks are deprecated, use returned Future instead", + DeprecationWarning) self._read_callback = stack_context.wrap(callback) else: - self._read_future = TracebackFuture() + self._read_future = Future() return self._read_future def _run_read_callback(self, size, streaming): + if self._user_read_buffer: + self._read_buffer = self._after_user_read_buffer or bytearray() + self._after_user_read_buffer = None + self._read_buffer_pos = 0 + self._read_buffer_size = len(self._read_buffer) + self._user_read_buffer = False + result = size + else: + result = self._consume(size) if streaming: callback = self._streaming_callback else: @@ -684,10 +885,11 @@ class BaseIOStream(object): assert callback is None future = self._read_future self._read_future = None - future.set_result(self._consume(size)) + + future.set_result(result) if callback is not None: assert (self._read_future is None) or streaming - self._run_callback(callback, self._consume(size)) + self._run_callback(callback, result) else: # If we scheduled a callback, we will add the error listener # afterwards. If we didn't, we have to do it now. @@ -733,31 +935,44 @@ class BaseIOStream(object): to read (i.e. the read returns EWOULDBLOCK or equivalent). On error closes the socket and raises an exception. """ - while True: - try: - chunk = self.read_from_fd() - except (socket.error, IOError, OSError) as e: - if errno_from_exception(e) == errno.EINTR: - continue - # ssl.SSLError is a subclass of socket.error - if self._is_connreset(e): - # Treat ECONNRESET as a connection close rather than - # an error to minimize log spam (the exception will - # be available on self.error for apps that care). - self.close(exc_info=True) - return - self.close(exc_info=True) - raise - break - if chunk is None: - return 0 - self._read_buffer += chunk - self._read_buffer_size += len(chunk) + try: + while True: + try: + if self._user_read_buffer: + buf = memoryview(self._read_buffer)[self._read_buffer_size:] + else: + buf = bytearray(self.read_chunk_size) + bytes_read = self.read_from_fd(buf) + except (socket.error, IOError, OSError) as e: + if errno_from_exception(e) == errno.EINTR: + continue + # ssl.SSLError is a subclass of socket.error + if self._is_connreset(e): + # Treat ECONNRESET as a connection close rather than + # an error to minimize log spam (the exception will + # be available on self.error for apps that care). + self.close(exc_info=e) + return + self.close(exc_info=e) + raise + break + if bytes_read is None: + return 0 + elif bytes_read == 0: + self.close() + return 0 + if not self._user_read_buffer: + self._read_buffer += memoryview(buf)[:bytes_read] + self._read_buffer_size += bytes_read + finally: + # Break the reference to buf so we don't waste a chunk's worth of + # memory in case an exception hangs on to our stack frame. + buf = None if self._read_buffer_size > self.max_buffer_size: gen_log.error("Reached maximum read buffer size") self.close() raise StreamBufferFullError("Reached maximum read buffer size") - return len(chunk) + return bytes_read def _run_streaming_callback(self): if self._streaming_callback is not None and self._read_buffer_size: @@ -827,56 +1042,28 @@ class BaseIOStream(object): "delimiter %r not found within %d bytes" % ( delimiter, self._read_max_bytes)) - def _freeze_write_buffer(self, size): - self._write_buffer_frozen = size - - def _unfreeze_write_buffer(self): - self._write_buffer_frozen = False - self._write_buffer += b''.join(self._pending_writes_while_frozen) - self._write_buffer_size += sum(map(len, self._pending_writes_while_frozen)) - self._pending_writes_while_frozen[:] = [] - - def _got_empty_write(self, size): - """ - Called when a non-blocking write() failed writing anything. - Can be overridden in subclasses. - """ - def _handle_write(self): - while self._write_buffer_size: - assert self._write_buffer_size >= 0 + while True: + size = len(self._write_buffer) + if not size: + break + assert size > 0 try: - start = self._write_buffer_pos - if self._write_buffer_frozen: - size = self._write_buffer_frozen - elif _WINDOWS: + if _WINDOWS: # On windows, socket.send blows up if given a # write buffer that's too large, instead of just # returning the number of bytes it was able to # process. Therefore we must not call socket.send # with more than 128KB at a time. size = 128 * 1024 - else: - size = self._write_buffer_size - num_bytes = self.write_to_fd( - memoryview(self._write_buffer)[start:start + size]) + + num_bytes = self.write_to_fd(self._write_buffer.peek(size)) if num_bytes == 0: - self._got_empty_write(size) break - self._write_buffer_pos += num_bytes - self._write_buffer_size -= num_bytes - # Amortized O(1) shrink - # (this heuristic is implemented natively in Python 3.4+ - # but is replicated here for Python 2) - if self._write_buffer_pos > self._write_buffer_size: - del self._write_buffer[:self._write_buffer_pos] - self._write_buffer_pos = 0 - if self._write_buffer_frozen: - self._unfreeze_write_buffer() + self._write_buffer.advance(num_bytes) self._total_write_done_index += num_bytes except (socket.error, IOError, OSError) as e: if e.args[0] in _ERRNO_WOULDBLOCK: - self._got_empty_write(size) break else: if not self._is_connreset(e): @@ -885,7 +1072,7 @@ class BaseIOStream(object): # minimize log spam gen_log.warning("Write error on %s: %s", self.fileno(), e) - self.close(exc_info=True) + self.close(exc_info=e) return while self._write_futures: @@ -895,7 +1082,7 @@ class BaseIOStream(object): self._write_futures.popleft() future.set_result(None) - if not self._write_buffer_size: + if not len(self._write_buffer): if self._write_callback: callback = self._write_callback self._write_callback = None @@ -1003,24 +1190,23 @@ class IOStream(BaseIOStream): import tornado.iostream import socket - def send_request(): - stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n") - stream.read_until(b"\r\n\r\n", on_headers) - - def on_headers(data): + async def main(): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + stream = tornado.iostream.IOStream(s) + await stream.connect(("friendfeed.com", 80)) + await stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n") + header_data = await stream.read_until(b"\r\n\r\n") headers = {} - for line in data.split(b"\r\n"): - parts = line.split(b":") - if len(parts) == 2: - headers[parts[0].strip()] = parts[1].strip() - stream.read_bytes(int(headers[b"Content-Length"]), on_body) - - def on_body(data): - print(data) + for line in header_data.split(b"\r\n"): + parts = line.split(b":") + if len(parts) == 2: + headers[parts[0].strip()] = parts[1].strip() + body_data = await stream.read_bytes(int(headers[b"Content-Length"])) + print(body_data) stream.close() - tornado.ioloop.IOLoop.current().stop() if __name__ == '__main__': + tornado.ioloop.IOLoop.current().run_sync(main) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = tornado.iostream.IOStream(s) stream.connect(("friendfeed.com", 80), send_request) @@ -1047,21 +1233,24 @@ class IOStream(BaseIOStream): socket.SO_ERROR) return socket.error(errno, os.strerror(errno)) - def read_from_fd(self): + def read_from_fd(self, buf): try: - chunk = self.socket.recv(self.read_chunk_size) + return self.socket.recv_into(buf) except socket.error as e: if e.args[0] in _ERRNO_WOULDBLOCK: return None else: raise - if not chunk: - self.close() - return None - return chunk + finally: + buf = None def write_to_fd(self, data): - return self.socket.send(data) + try: + return self.socket.send(data) + finally: + # Avoid keeping to data, which can be a memoryview. + # See https://github.com/tornadoweb/tornado/pull/2008 + del data def connect(self, address, callback=None, server_hostname=None): """Connects the socket to a remote address without blocking. @@ -1101,13 +1290,21 @@ class IOStream(BaseIOStream): ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to the `SSLIOStream` constructor to disable. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + """ self._connecting = True if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) self._connect_callback = stack_context.wrap(callback) future = None else: - future = self._connect_future = TracebackFuture() + future = self._connect_future = Future() try: self.socket.connect(address) except socket.error as e: @@ -1123,7 +1320,7 @@ class IOStream(BaseIOStream): if future is None: gen_log.warning("Connect error on fd %s: %s", self.socket.fileno(), e) - self.close(exc_info=True) + self.close(exc_info=e) return future self._add_io_state(self.io_loop.WRITE) return future @@ -1185,9 +1382,8 @@ class IOStream(BaseIOStream): orig_close_callback = self._close_callback self._close_callback = None - future = TracebackFuture() - ssl_stream = SSLIOStream(socket, ssl_options=ssl_options, - io_loop=self.io_loop) + future = Future() + ssl_stream = SSLIOStream(socket, ssl_options=ssl_options) # Wrap the original close callback so we can fail our Future as well. # If we had an "unwrap" counterpart to this method we would need # to restore the original callback after our Future resolves @@ -1214,7 +1410,13 @@ class IOStream(BaseIOStream): return future def _handle_connect(self): - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + try: + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + except socket.error as e: + # Hurd doesn't allow SO_ERROR for loopback sockets because all + # errors for such sockets are reported synchronously. + if errno_from_exception(e) == errno.ENOPROTOOPT: + err = 0 if err != 0: self.error = socket.error(err, os.strerror(err)) # IOLoop implementations may vary: some of them return @@ -1291,17 +1493,6 @@ class SSLIOStream(IOStream): def writing(self): return self._handshake_writing or super(SSLIOStream, self).writing() - def _got_empty_write(self, size): - # With OpenSSL, if we couldn't write the entire buffer, - # the very same string object must be used on the - # next call to send. Therefore we suppress - # merging the write buffer after an incomplete send. - # A cleaner solution would be to set - # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is - # not yet accessible from python - # (http://bugs.python.org/issue8240) - self._freeze_write_buffer(size) - def _do_ssl_handshake(self): # Based on code from test_ssl.py in the python stdlib try: @@ -1317,7 +1508,7 @@ class SSLIOStream(IOStream): return elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): - return self.close(exc_info=True) + return self.close(exc_info=err) elif err.args[0] == ssl.SSL_ERROR_SSL: try: peer = self.socket.getpeername() @@ -1325,7 +1516,7 @@ class SSLIOStream(IOStream): peer = '(not connected)' gen_log.warning("SSL Error on %s %s: %s", self.socket.fileno(), peer, err) - return self.close(exc_info=True) + return self.close(exc_info=err) raise except socket.error as err: # Some port scans (e.g. nmap in -sT mode) have been known @@ -1334,13 +1525,13 @@ class SSLIOStream(IOStream): # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0 if (self._is_connreset(err) or err.args[0] in (errno.EBADF, errno.ENOTCONN)): - return self.close(exc_info=True) + return self.close(exc_info=err) raise - except AttributeError: + except AttributeError as err: # On Linux, if the connection was reset before the call to # wrap_socket, do_handshake will fail with an # AttributeError. - return self.close(exc_info=True) + return self.close(exc_info=err) else: self._ssl_accepting = False if not self._verify_cert(self.socket.getpeercert()): @@ -1378,8 +1569,8 @@ class SSLIOStream(IOStream): gen_log.warning("No SSL certificate given") return False try: - ssl_match_hostname(peercert, self._server_hostname) - except SSLCertificateError as e: + ssl.match_hostname(peercert, self._server_hostname) + except ssl.CertificateError as e: gen_log.warning("Invalid SSL certificate: %s" % e) return False else: @@ -1399,9 +1590,13 @@ class SSLIOStream(IOStream): def connect(self, address, callback=None, server_hostname=None): self._server_hostname = server_hostname - # Pass a dummy callback to super.connect(), which is slightly - # more efficient than letting it return a Future we ignore. - super(SSLIOStream, self).connect(address, callback=lambda: None) + # Ignore the result of connect(). If it fails, + # wait_for_handshake will raise an error too. This is + # necessary for the old semantics of the connect callback + # (which takes no arguments). In 6.0 this can be refactored to + # be a regular coroutine. + fut = super(SSLIOStream, self).connect(address) + fut.add_done_callback(lambda f: f.exception()) return self.wait_for_handshake(callback) def _handle_connect(self): @@ -1445,15 +1640,23 @@ class SSLIOStream(IOStream): handshake to complete). It may only be called once per stream. .. versionadded:: 4.2 + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + """ if (self._ssl_connect_callback is not None or self._ssl_connect_future is not None): raise RuntimeError("Already waiting") if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) self._ssl_connect_callback = stack_context.wrap(callback) future = None else: - future = self._ssl_connect_future = TracebackFuture() + future = self._ssl_connect_future = Future() if not self._ssl_accepting: self._run_ssl_connect_callback() return future @@ -1471,36 +1674,34 @@ class SSLIOStream(IOStream): # simply return 0 bytes written. return 0 raise + finally: + # Avoid keeping to data, which can be a memoryview. + # See https://github.com/tornadoweb/tornado/pull/2008 + del data - def read_from_fd(self): - if self._ssl_accepting: - # If the handshake hasn't finished yet, there can't be anything - # to read (attempting to read may or may not raise an exception - # depending on the SSL version) - return None + def read_from_fd(self, buf): try: - # SSLSocket objects have both a read() and recv() method, - # while regular sockets only have recv(). - # The recv() method blocks (at least in python 2.6) if it is - # called when there is nothing to read, so we have to use - # read() instead. - chunk = self.socket.read(self.read_chunk_size) - except ssl.SSLError as e: - # SSLError is a subclass of socket.error, so this except - # block must come first. - if e.args[0] == ssl.SSL_ERROR_WANT_READ: + if self._ssl_accepting: + # If the handshake hasn't finished yet, there can't be anything + # to read (attempting to read may or may not raise an exception + # depending on the SSL version) return None - else: - raise - except socket.error as e: - if e.args[0] in _ERRNO_WOULDBLOCK: - return None - else: - raise - if not chunk: - self.close() - return None - return chunk + try: + return self.socket.recv_into(buf) + except ssl.SSLError as e: + # SSLError is a subclass of socket.error, so this except + # block must come first. + if e.args[0] == ssl.SSL_ERROR_WANT_READ: + return None + else: + raise + except socket.error as e: + if e.args[0] in _ERRNO_WOULDBLOCK: + return None + else: + raise + finally: + buf = None def _is_connreset(self, e): if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF: @@ -1518,6 +1719,7 @@ class PipeIOStream(BaseIOStream): """ def __init__(self, fd, *args, **kwargs): self.fd = fd + self._fio = io.FileIO(self.fd, "r+") _set_nonblocking(fd) super(PipeIOStream, self).__init__(*args, **kwargs) @@ -1525,28 +1727,29 @@ class PipeIOStream(BaseIOStream): return self.fd def close_fd(self): - os.close(self.fd) + self._fio.close() def write_to_fd(self, data): - return os.write(self.fd, data) - - def read_from_fd(self): try: - chunk = os.read(self.fd, self.read_chunk_size) + return os.write(self.fd, data) + finally: + # Avoid keeping to data, which can be a memoryview. + # See https://github.com/tornadoweb/tornado/pull/2008 + del data + + def read_from_fd(self, buf): + try: + return self._fio.readinto(buf) except (IOError, OSError) as e: - if errno_from_exception(e) in _ERRNO_WOULDBLOCK: - return None - elif errno_from_exception(e) == errno.EBADF: + if errno_from_exception(e) == errno.EBADF: # If the writing half of a pipe is closed, select will # report it as readable but reads will fail with EBADF. - self.close(exc_info=True) + self.close(exc_info=e) return None else: raise - if not chunk: - self.close() - return None - return chunk + finally: + buf = None def doctests(): diff --git a/server/www/packages/packages-common/tornado/locale.py b/server/www/packages/packages-common/tornado/locale.py index 7dba10d..d45172f 100644 --- a/server/www/packages/packages-common/tornado/locale.py +++ b/server/www/packages/packages-common/tornado/locale.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- +# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/server/www/packages/packages-common/tornado/locks.py b/server/www/packages/packages-common/tornado/locks.py index 4f9ecf6..9566a45 100644 --- a/server/www/packages/packages-common/tornado/locks.py +++ b/server/www/packages/packages-common/tornado/locks.py @@ -15,9 +15,10 @@ from __future__ import absolute_import, division, print_function import collections +from concurrent.futures import CancelledError from tornado import gen, ioloop -from tornado.concurrent import Future +from tornado.concurrent import Future, future_set_result_unless_cancelled __all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock'] @@ -60,22 +61,19 @@ class Condition(_TimeoutGarbageCollector): condition = Condition() - @gen.coroutine - def waiter(): + async def waiter(): print("I'll wait right here") - yield condition.wait() # Yield a Future. + await condition.wait() print("I'm done waiting") - @gen.coroutine - def notifier(): + async def notifier(): print("About to notify") condition.notify() print("Done notifying") - @gen.coroutine - def runner(): - # Yield two Futures; wait for waiter() and notifier() to finish. - yield [waiter(), notifier()] + async def runner(): + # Wait for waiter() and notifier() in parallel + await gen.multi([waiter(), notifier()]) IOLoop.current().run_sync(runner) @@ -92,15 +90,19 @@ class Condition(_TimeoutGarbageCollector): io_loop = IOLoop.current() # Wait up to 1 second for a notification. - yield condition.wait(timeout=io_loop.time() + 1) + await condition.wait(timeout=io_loop.time() + 1) ...or a `datetime.timedelta` for a timeout relative to the current time:: # Wait up to 1 second. - yield condition.wait(timeout=datetime.timedelta(seconds=1)) + await condition.wait(timeout=datetime.timedelta(seconds=1)) - The method raises `tornado.gen.TimeoutError` if there's no notification - before the deadline. + The method returns False if there's no notification before the deadline. + + .. versionchanged:: 5.0 + Previously, waiters could be notified synchronously from within + `notify`. Now, the notification will always be received on the + next iteration of the `.IOLoop`. """ def __init__(self): @@ -123,7 +125,8 @@ class Condition(_TimeoutGarbageCollector): self._waiters.append(waiter) if timeout: def on_timeout(): - waiter.set_result(False) + if not waiter.done(): + future_set_result_unless_cancelled(waiter, False) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) @@ -141,7 +144,7 @@ class Condition(_TimeoutGarbageCollector): waiters.append(waiter) for waiter in waiters: - waiter.set_result(True) + future_set_result_unless_cancelled(waiter, True) def notify_all(self): """Wake all waiters.""" @@ -164,22 +167,19 @@ class Event(object): event = Event() - @gen.coroutine - def waiter(): + async def waiter(): print("Waiting for event") - yield event.wait() + await event.wait() print("Not waiting this time") - yield event.wait() + await event.wait() print("Done") - @gen.coroutine - def setter(): + async def setter(): print("About to set the event") event.set() - @gen.coroutine - def runner(): - yield [waiter(), setter()] + async def runner(): + await gen.multi([waiter(), setter()]) IOLoop.current().run_sync(runner) @@ -191,7 +191,8 @@ class Event(object): Done """ def __init__(self): - self._future = Future() + self._value = False + self._waiters = set() def __repr__(self): return '<%s %s>' % ( @@ -199,34 +200,48 @@ class Event(object): def is_set(self): """Return ``True`` if the internal flag is true.""" - return self._future.done() + return self._value def set(self): """Set the internal flag to ``True``. All waiters are awakened. Calling `.wait` once the flag is set will not block. """ - if not self._future.done(): - self._future.set_result(None) + if not self._value: + self._value = True + + for fut in self._waiters: + if not fut.done(): + fut.set_result(None) def clear(self): """Reset the internal flag to ``False``. Calls to `.wait` will block until `.set` is called. """ - if self._future.done(): - self._future = Future() + self._value = False def wait(self, timeout=None): """Block until the internal flag is true. - Returns a Future, which raises `tornado.gen.TimeoutError` after a + Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. """ + fut = Future() + if self._value: + fut.set_result(None) + return fut + self._waiters.add(fut) + fut.add_done_callback(lambda fut: self._waiters.remove(fut)) if timeout is None: - return self._future + return fut else: - return gen.with_timeout(timeout, self._future) + timeout_fut = gen.with_timeout(timeout, fut, quiet_exceptions=(CancelledError,)) + # This is a slightly clumsy workaround for the fact that + # gen.with_timeout doesn't cancel its futures. Cancelling + # fut will remove it from the waiters list. + timeout_fut.add_done_callback(lambda tf: fut.cancel() if not fut.done() else None) + return timeout_fut class _ReleasingContextManager(object): @@ -269,10 +284,11 @@ class Semaphore(_TimeoutGarbageCollector): # Ensure reliable doctest output: resolve Futures one at a time. futures_q = deque([Future() for _ in range(3)]) - @gen.coroutine - def simulator(futures): + async def simulator(futures): for f in futures: - yield gen.moment + # simulate the asynchronous passage of time + await gen.sleep(0) + await gen.sleep(0) f.set_result(None) IOLoop.current().add_callback(simulator, list(futures_q)) @@ -288,20 +304,18 @@ class Semaphore(_TimeoutGarbageCollector): sem = Semaphore(2) - @gen.coroutine - def worker(worker_id): - yield sem.acquire() + async def worker(worker_id): + await sem.acquire() try: print("Worker %d is working" % worker_id) - yield use_some_resource() + await use_some_resource() finally: print("Worker %d is done" % worker_id) sem.release() - @gen.coroutine - def runner(): + async def runner(): # Join all workers. - yield [worker(i) for i in range(3)] + await gen.multi([worker(i) for i in range(3)]) IOLoop.current().run_sync(runner) @@ -317,7 +331,18 @@ class Semaphore(_TimeoutGarbageCollector): Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until the semaphore has been released once, by worker 0. - `.acquire` is a context manager, so ``worker`` could be written as:: + The semaphore can be used as an async context manager:: + + async def worker(worker_id): + async with sem: + print("Worker %d is working" % worker_id) + await use_some_resource() + + # Now the semaphore has been released. + print("Worker %d is done" % worker_id) + + For compatibility with older versions of Python, `.acquire` is a + context manager, so ``worker`` could also be written as:: @gen.coroutine def worker(worker_id): @@ -328,19 +353,9 @@ class Semaphore(_TimeoutGarbageCollector): # Now the semaphore has been released. print("Worker %d is done" % worker_id) - In Python 3.5, the semaphore itself can be used as an async context - manager:: - - async def worker(worker_id): - async with sem: - print("Worker %d is working" % worker_id) - await use_some_resource() - - # Now the semaphore has been released. - print("Worker %d is done" % worker_id) - .. versionchanged:: 4.3 Added ``async with`` support in Python 3.5. + """ def __init__(self, value=1): super(Semaphore, self).__init__() @@ -388,7 +403,8 @@ class Semaphore(_TimeoutGarbageCollector): self._waiters.append(waiter) if timeout: def on_timeout(): - waiter.set_exception(gen.TimeoutError()) + if not waiter.done(): + waiter.set_exception(gen.TimeoutError()) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) @@ -440,26 +456,24 @@ class Lock(object): Releasing an unlocked lock raises `RuntimeError`. - `acquire` supports the context manager protocol in all Python versions: + A Lock can be used as an async context manager with the ``async + with`` statement: - >>> from tornado import gen, locks + >>> from tornado import locks >>> lock = locks.Lock() >>> - >>> @gen.coroutine - ... def f(): - ... with (yield lock.acquire()): + >>> async def f(): + ... async with lock: ... # Do something holding the lock. ... pass ... ... # Now the lock is released. - In Python 3.5, `Lock` also supports the async context manager - protocol. Note that in this case there is no `acquire`, because - ``async with`` includes both the ``yield`` and the ``acquire`` - (just as it does with `threading.Lock`): + For compatibility with older versions of Python, the `.acquire` + method asynchronously returns a regular context manager: - >>> async def f(): # doctest: +SKIP - ... async with lock: + >>> async def f2(): + ... with (yield lock.acquire()): ... # Do something holding the lock. ... pass ... @@ -480,7 +494,7 @@ class Lock(object): def acquire(self, timeout=None): """Attempt to lock. Returns a Future. - Returns a Future, which raises `tornado.gen.TimeoutError` after a + Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. """ return self._block.acquire(timeout) diff --git a/server/www/packages/packages-common/tornado/log.py b/server/www/packages/packages-common/tornado/log.py index 654afc0..cda905c 100644 --- a/server/www/packages/packages-common/tornado/log.py +++ b/server/www/packages/packages-common/tornado/log.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2012 Facebook # @@ -102,7 +101,8 @@ class LogFormatter(logging.Formatter): Added support for ``colorama``. Changed the constructor signature to be compatible with `logging.config.dictConfig`. """ - DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' + DEFAULT_FORMAT = \ + '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' DEFAULT_COLORS = { logging.DEBUG: 4, # Blue @@ -115,13 +115,13 @@ class LogFormatter(logging.Formatter): style='%', color=True, colors=DEFAULT_COLORS): r""" :arg bool color: Enables color support. - :arg string fmt: Log message format. + :arg str fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code - :arg string datefmt: Datetime format. + :arg str datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 @@ -177,7 +177,7 @@ class LogFormatter(logging.Formatter): # bytestrings. This is a bit of a hacky place to do this, but # it's worth it since the encoding errors that would otherwise # result are so useless (and tornado is fond of using utf8-encoded - # byte strings whereever possible). + # byte strings wherever possible). record.message = _safe_unicode(message) except Exception as e: record.message = "Bad message (%r): %r" % (e, record.__dict__) diff --git a/server/www/packages/packages-common/tornado/netutil.py b/server/www/packages/packages-common/tornado/netutil.py index c34c8c8..e63683a 100644 --- a/server/www/packages/packages-common/tornado/netutil.py +++ b/server/www/packages/packages-common/tornado/netutil.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2011 Facebook # @@ -25,6 +24,7 @@ import socket import stat from tornado.concurrent import dummy_executor, run_on_executor +from tornado import gen from tornado.ioloop import IOLoop from tornado.platform.auto import set_close_exec from tornado.util import PY3, Configurable, errno_from_exception @@ -35,54 +35,20 @@ except ImportError: # ssl is not available on Google App Engine ssl = None -try: - import certifi -except ImportError: - # certifi is optional as long as we have ssl.create_default_context. - if ssl is None or hasattr(ssl, 'create_default_context'): - certifi = None - else: - raise - if PY3: xrange = range -if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+ - ssl_match_hostname = ssl.match_hostname - SSLCertificateError = ssl.CertificateError -elif ssl is None: - ssl_match_hostname = SSLCertificateError = None # type: ignore -else: - import backports.ssl_match_hostname - ssl_match_hostname = backports.ssl_match_hostname.match_hostname - SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore - -if hasattr(ssl, 'SSLContext'): - if hasattr(ssl, 'create_default_context'): - # Python 2.7.9+, 3.4+ - # Note that the naming of ssl.Purpose is confusing; the purpose - # of a context is to authentiate the opposite side of the connection. - _client_ssl_defaults = ssl.create_default_context( - ssl.Purpose.SERVER_AUTH) - _server_ssl_defaults = ssl.create_default_context( - ssl.Purpose.CLIENT_AUTH) - else: - # Python 3.2-3.3 - _client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - _client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED - _client_ssl_defaults.load_verify_locations(certifi.where()) - _server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - if hasattr(ssl, 'OP_NO_COMPRESSION'): - # Disable TLS compression to avoid CRIME and related attacks. - # This constant wasn't added until python 3.3. - _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION - _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION - -elif ssl: - # Python 2.6-2.7.8 - _client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED, - ca_certs=certifi.where()) - _server_ssl_defaults = {} +if ssl is not None: + # Note that the naming of ssl.Purpose is confusing; the purpose + # of a context is to authentiate the opposite side of the connection. + _client_ssl_defaults = ssl.create_default_context( + ssl.Purpose.SERVER_AUTH) + _server_ssl_defaults = ssl.create_default_context( + ssl.Purpose.CLIENT_AUTH) + if hasattr(ssl, 'OP_NO_COMPRESSION'): + # See netutil.ssl_options_to_context + _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION + _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION else: # Google App Engine _client_ssl_defaults = dict(cert_reqs=None, @@ -172,7 +138,12 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC, raise set_close_exec(sock.fileno()) if os.name != 'nt': - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + except socket.error as e: + if errno_from_exception(e) != errno.ENOPROTOOPT: + # Hurd doesn't support SO_REUSEADDR. + raise if reuse_port: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) if af == socket.AF_INET6: @@ -214,7 +185,12 @@ if hasattr(socket, 'AF_UNIX'): """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) set_close_exec(sock.fileno()) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + except socket.error as e: + if errno_from_exception(e) != errno.ENOPROTOOPT: + # Hurd doesn't support SO_REUSEADDR + raise sock.setblocking(0) try: st = os.stat(file) @@ -232,7 +208,7 @@ if hasattr(socket, 'AF_UNIX'): return sock -def add_accept_handler(sock, callback, io_loop=None): +def add_accept_handler(sock, callback): """Adds an `.IOLoop` event handler to accept new connections on ``sock``. When a connection is accepted, ``callback(connection, address)`` will @@ -241,11 +217,17 @@ def add_accept_handler(sock, callback, io_loop=None): is different from the ``callback(fd, events)`` signature used for `.IOLoop` handlers. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + A callable is returned which, when called, will remove the `.IOLoop` + event handler and stop processing further incoming connections. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. versionchanged:: 5.0 + A callable is returned (``None`` was returned before). """ - if io_loop is None: - io_loop = IOLoop.current() + io_loop = IOLoop.current() + removed = [False] def accept_handler(fd, events): # More connections may come in while we're handling callbacks; @@ -260,6 +242,9 @@ def add_accept_handler(sock, callback, io_loop=None): # heuristic for the number of connections we can reasonably # accept at once. for i in xrange(_DEFAULT_BACKLOG): + if removed[0]: + # The socket was probably closed + return try: connection, address = sock.accept() except socket.error as e: @@ -273,8 +258,15 @@ def add_accept_handler(sock, callback, io_loop=None): if errno_from_exception(e) == errno.ECONNABORTED: continue raise + set_close_exec(connection.fileno()) callback(connection, address) + + def remove_handler(): + io_loop.remove_handler(sock) + removed[0] = True + io_loop.add_handler(sock, accept_handler, IOLoop.READ) + return remove_handler def is_valid_ip(ip): @@ -310,11 +302,16 @@ class Resolver(Configurable): The implementations of this interface included with Tornado are - * `tornado.netutil.BlockingResolver` - * `tornado.netutil.ThreadedResolver` + * `tornado.netutil.DefaultExecutorResolver` + * `tornado.netutil.BlockingResolver` (deprecated) + * `tornado.netutil.ThreadedResolver` (deprecated) * `tornado.netutil.OverrideResolver` * `tornado.platform.twisted.TwistedResolver` * `tornado.platform.caresresolver.CaresResolver` + + .. versionchanged:: 5.0 + The default implementation has changed from `BlockingResolver` to + `DefaultExecutorResolver`. """ @classmethod def configurable_base(cls): @@ -322,7 +319,7 @@ class Resolver(Configurable): @classmethod def configurable_default(cls): - return BlockingResolver + return DefaultExecutorResolver def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None): """Resolves an address. @@ -341,6 +338,10 @@ class Resolver(Configurable): .. versionchanged:: 4.4 Standardized all implementations to raise `IOError`. + + .. deprecated:: 5.1 + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. """ raise NotImplementedError() @@ -353,6 +354,31 @@ class Resolver(Configurable): pass +def _resolve_addr(host, port, family=socket.AF_UNSPEC): + # On Solaris, getaddrinfo fails if the given port is not found + # in /etc/services and no socket type is given, so we must pass + # one here. The socket type used here doesn't seem to actually + # matter (we discard the one we get back in the results), + # so the addresses we return should still be usable with SOCK_DGRAM. + addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) + results = [] + for family, socktype, proto, canonname, address in addrinfo: + results.append((family, address)) + return results + + +class DefaultExecutorResolver(Resolver): + """Resolver implementation using `.IOLoop.run_in_executor`. + + .. versionadded:: 5.0 + """ + @gen.coroutine + def resolve(self, host, port, family=socket.AF_UNSPEC): + result = yield IOLoop.current().run_in_executor( + None, _resolve_addr, host, port, family) + raise gen.Return(result) + + class ExecutorResolver(Resolver): """Resolver implementation using a `concurrent.futures.Executor`. @@ -363,11 +389,15 @@ class ExecutorResolver(Resolver): ``close_resolver=False``; use this if you want to reuse the same executor elsewhere. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. deprecated:: 5.0 + The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead + of this class. """ - def initialize(self, io_loop=None, executor=None, close_executor=True): - self.io_loop = io_loop or IOLoop.current() + def initialize(self, executor=None, close_executor=True): + self.io_loop = IOLoop.current() if executor is not None: self.executor = executor self.close_executor = close_executor @@ -382,16 +412,7 @@ class ExecutorResolver(Resolver): @run_on_executor def resolve(self, host, port, family=socket.AF_UNSPEC): - # On Solaris, getaddrinfo fails if the given port is not found - # in /etc/services and no socket type is given, so we must pass - # one here. The socket type used here doesn't seem to actually - # matter (we discard the one we get back in the results), - # so the addresses we return should still be usable with SOCK_DGRAM. - addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) - results = [] - for family, socktype, proto, canonname, address in addrinfo: - results.append((family, address)) - return results + return _resolve_addr(host, port, family) class BlockingResolver(ExecutorResolver): @@ -399,9 +420,13 @@ class BlockingResolver(ExecutorResolver): The `.IOLoop` will be blocked during the resolution, although the callback will not be run until the next `.IOLoop` iteration. + + .. deprecated:: 5.0 + The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead + of this class. """ - def initialize(self, io_loop=None): - super(BlockingResolver, self).initialize(io_loop=io_loop) + def initialize(self): + super(BlockingResolver, self).initialize() class ThreadedResolver(ExecutorResolver): @@ -419,14 +444,18 @@ class ThreadedResolver(ExecutorResolver): .. versionchanged:: 3.1 All ``ThreadedResolvers`` share a single thread pool, whose size is set by the first one to be created. + + .. deprecated:: 5.0 + The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead + of this class. """ _threadpool = None # type: ignore _threadpool_pid = None # type: int - def initialize(self, io_loop=None, num_threads=10): + def initialize(self, num_threads=10): threadpool = ThreadedResolver._create_threadpool(num_threads) super(ThreadedResolver, self).initialize( - io_loop=io_loop, executor=threadpool, close_executor=False) + executor=threadpool, close_executor=False) @classmethod def _create_threadpool(cls, num_threads): @@ -448,7 +477,21 @@ class OverrideResolver(Resolver): This can be used to make local DNS changes (e.g. for testing) without modifying system-wide settings. - The mapping can contain either host strings or host-port pairs. + The mapping can be in three formats:: + + { + # Hostname to host or ip + "example.com": "127.0.1.1", + + # Host+port to host+port + ("login.example.com", 443): ("localhost", 1443), + + # Host+port+address family to host+port + ("login.example.com", 443, socket.AF_INET6): ("::1", 1443), + } + + .. versionchanged:: 5.0 + Added support for host-port-family triplets. """ def initialize(self, resolver, mapping): self.resolver = resolver @@ -457,12 +500,14 @@ class OverrideResolver(Resolver): def close(self): self.resolver.close() - def resolve(self, host, port, *args, **kwargs): - if (host, port) in self.mapping: + def resolve(self, host, port, family=socket.AF_UNSPEC, *args, **kwargs): + if (host, port, family) in self.mapping: + host, port = self.mapping[(host, port, family)] + elif (host, port) in self.mapping: host, port = self.mapping[(host, port)] elif host in self.mapping: host = self.mapping[host] - return self.resolver.resolve(host, port, *args, **kwargs) + return self.resolver.resolve(host, port, family, *args, **kwargs) # These are the keyword arguments to ssl.wrap_socket that must be translated @@ -483,11 +528,12 @@ def ssl_options_to_context(ssl_options): accepts both forms needs to upgrade to the `~ssl.SSLContext` version to use features like SNI or NPN. """ - if isinstance(ssl_options, dict): - assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options - if (not hasattr(ssl, 'SSLContext') or - isinstance(ssl_options, ssl.SSLContext)): + if isinstance(ssl_options, ssl.SSLContext): return ssl_options + assert isinstance(ssl_options, dict) + assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options + # Can't use create_default_context since this interface doesn't + # tell us client vs server. context = ssl.SSLContext( ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23)) if 'certfile' in ssl_options: @@ -500,7 +546,9 @@ def ssl_options_to_context(ssl_options): context.set_ciphers(ssl_options['ciphers']) if hasattr(ssl, 'OP_NO_COMPRESSION'): # Disable TLS compression to avoid CRIME and related attacks. - # This constant wasn't added until python 3.3. + # This constant depends on openssl version 1.0. + # TODO: Do we need to do this ourselves or can we trust + # the defaults? context.options |= ssl.OP_NO_COMPRESSION return context @@ -515,14 +563,13 @@ def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs): appropriate). """ context = ssl_options_to_context(ssl_options) - if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext): - if server_hostname is not None and getattr(ssl, 'HAS_SNI'): - # Python doesn't have server-side SNI support so we can't - # really unittest this, but it can be manually tested with - # python3.2 -m tornado.httpclient https://sni.velox.ch - return context.wrap_socket(socket, server_hostname=server_hostname, - **kwargs) - else: - return context.wrap_socket(socket, **kwargs) + if ssl.HAS_SNI: + # In python 3.4, wrap_socket only accepts the server_hostname + # argument if HAS_SNI is true. + # TODO: add a unittest (python added server-side SNI support in 3.4) + # In the meantime it can be manually tested with + # python3 -m tornado.httpclient https://sni.velox.ch + return context.wrap_socket(socket, server_hostname=server_hostname, + **kwargs) else: - return ssl.wrap_socket(socket, **dict(context, **kwargs)) # type: ignore + return context.wrap_socket(socket, **kwargs) diff --git a/server/www/packages/packages-common/tornado/options.py b/server/www/packages/packages-common/tornado/options.py index 0a72cc6..0a4b965 100644 --- a/server/www/packages/packages-common/tornado/options.py +++ b/server/www/packages/packages-common/tornado/options.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -16,9 +15,19 @@ """A command line parsing module that lets modules define their own options. -Each module defines its own options which are added to the global -option namespace, e.g.:: +This module is inspired by Google's `gflags +`_. The primary difference +with libraries such as `argparse` is that a global registry is used so +that options may be defined in any module (it also enables +`tornado.log` by default). The rest of Tornado does not depend on this +module, so feel free to use `argparse` or other configuration +libraries if you prefer them. +Options must be defined with `tornado.options.define` before use, +generally at the top level of a module. The options are then +accessible as attributes of `tornado.options.options`:: + + # myapp/db.py from tornado.options import define, options define("mysql_host", default="127.0.0.1:3306", help="Main user DB") @@ -29,34 +38,36 @@ option namespace, e.g.:: db = database.Connection(options.mysql_host) ... + # myapp/server.py + from tornado.options import define, options + + define("port", default=8080, help="port to listen on") + + def start_server(): + app = make_app() + app.listen(options.port) + The ``main()`` method of your application does not need to be aware of all of the options used throughout your program; they are all automatically loaded when the modules are loaded. However, all modules that define options must have been imported before the command line is parsed. Your ``main()`` method can parse the command line or parse a config file with -either:: +either `parse_command_line` or `parse_config_file`:: - tornado.options.parse_command_line() - # or - tornado.options.parse_config_file("/etc/server.conf") + import myapp.db, myapp.server + import tornado.options -.. note: + if __name__ == '__main__': + tornado.options.parse_command_line() + # or + tornado.options.parse_config_file("/etc/server.conf") - When using tornado.options.parse_command_line or - tornado.options.parse_config_file, the only options that are set are - ones that were previously defined with tornado.options.define. +.. note:: -Command line formats are what you would expect (``--myoption=myvalue``). -Config files are just Python files. Global names become options, e.g.:: - - myoption = "myvalue" - myotheroption = "myothervalue" - -We support `datetimes `, `timedeltas -`, ints, and floats (just pass a ``type`` kwarg to -`define`). We also accept multi-value options. See the documentation for -`define()` below. + When using multiple ``parse_*`` functions, pass ``final=False`` to all + but the last one, or side effects may occur twice (in particular, + this can result in log messages being doubled). `tornado.options.options` is a singleton instance of `OptionParser`, and the top-level functions in this module (`define`, `parse_command_line`, etc) @@ -190,13 +201,13 @@ class OptionParser(object): multiple=False, group=None, callback=None): """Defines a new command line option. - If ``type`` is given (one of str, float, int, datetime, or timedelta) - or can be inferred from the ``default``, we parse the command line - arguments based on the given type. If ``multiple`` is True, we accept - comma-separated values, and the option value is always a list. + ``type`` can be any of `str`, `int`, `float`, `bool`, + `~datetime.datetime`, or `~datetime.timedelta`. If no ``type`` + is given but a ``default`` is, ``type`` is the type of + ``default``. Otherwise, ``type`` defaults to `str`. - For multi-value integers, we also accept the syntax ``x:y``, which - turns into ``range(x, y)`` - very useful for long integer ranges. + If ``multiple`` is True, the option value is a list of ``type`` + instead of an instance of ``type``. ``help`` and ``metavar`` are used to construct the automatically generated command line help string. The help @@ -208,9 +219,7 @@ class OptionParser(object): groups. By default, command line options are grouped by the file in which they are defined. - Command line option names must be unique globally. They can be parsed - from the command line with `parse_command_line` or parsed from a - config file with `parse_config_file`. + Command line option names must be unique globally. If a ``callback`` is given, it will be run with the new value whenever the option is changed. This can be used to combine command-line @@ -222,10 +231,12 @@ class OptionParser(object): With this definition, options in the file specified by ``--config`` will override options set earlier on the command line, but can be overridden by later flags. + """ - if name in self._options: + normalized = self._normalize_name(name) + if normalized in self._options: raise Error("Option %r already defined in %s" % - (name, self._options[name].file_name)) + (normalized, self._options[normalized].file_name)) frame = sys._getframe(0) options_file = frame.f_code.co_filename @@ -247,7 +258,6 @@ class OptionParser(object): group_name = group else: group_name = file_name - normalized = self._normalize_name(name) option = _Option(name, file_name=file_name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, @@ -259,6 +269,14 @@ class OptionParser(object): """Parses all options given on the command line (defaults to `sys.argv`). + Options look like ``--option=value`` and are parsed according + to their ``type``. For boolean options, ``--option`` is + equivalent to ``--option=true`` + + If the option has ``multiple=True``, comma-separated values + are accepted. For multi-value integer options, the syntax + ``x:y`` is also accepted and equivalent to ``range(x, y)``. + Note that ``args[0]`` is ignored since it is the program name in `sys.argv`. @@ -267,6 +285,7 @@ class OptionParser(object): If ``final`` is ``False``, parse callbacks will not be run. This is useful for applications that wish to combine configurations from multiple sources. + """ if args is None: args = sys.argv @@ -299,12 +318,39 @@ class OptionParser(object): return remaining def parse_config_file(self, path, final=True): - """Parses and loads the Python config file at the given path. + """Parses and loads the config file at the given path. + + The config file contains Python code that will be executed (so + it is **not safe** to use untrusted config files). Anything in + the global namespace that matches a defined option will be + used to set that option's value. + + Options may either be the specified type for the option or + strings (in which case they will be parsed the same way as in + `.parse_command_line`) + + Example (using the options defined in the top-level docs of + this module):: + + port = 80 + mysql_host = 'mydb.example.com:3306' + # Both lists and comma-separated strings are allowed for + # multiple=True. + memcache_hosts = ['cache1.example.com:11011', + 'cache2.example.com:11011'] + memcache_hosts = 'cache1.example.com:11011,cache2.example.com:11011' If ``final`` is ``False``, parse callbacks will not be run. This is useful for applications that wish to combine configurations from multiple sources. + .. note:: + + `tornado.options` is primarily a command-line library. + Config file support is provided for applications that wish + to use it, but applications that prefer config files may + wish to look at other libraries instead. + .. versionchanged:: 4.1 Config files are now always interpreted as utf-8 instead of the system default encoding. @@ -312,6 +358,10 @@ class OptionParser(object): .. versionchanged:: 4.4 The special variable ``__file__`` is available inside config files, specifying the absolute path to the config file itself. + + .. versionchanged:: 5.1 + Added the ability to set options via strings in config files. + """ config = {'__file__': os.path.abspath(path)} with open(path, 'rb') as f: @@ -319,7 +369,17 @@ class OptionParser(object): for name in config: normalized = self._normalize_name(name) if normalized in self._options: - self._options[normalized].set(config[name]) + option = self._options[normalized] + if option.multiple: + if not isinstance(config[name], (list, str)): + raise Error("Option %r is required to be a list of %s " + "or a comma-separated string" % + (option.name, option.type.__name__)) + + if type(config[name]) == str and option.type != str: + option.parse(config[name]) + else: + option.set(config[name]) if final: self.run_parse_callbacks() diff --git a/server/www/packages/packages-common/tornado/platform/asyncio.py b/server/www/packages/packages-common/tornado/platform/asyncio.py index 830ee1f..e0042e1 100644 --- a/server/www/packages/packages-common/tornado/platform/asyncio.py +++ b/server/www/packages/packages-common/tornado/platform/asyncio.py @@ -3,14 +3,14 @@ .. versionadded:: 3.2 This module integrates Tornado with the ``asyncio`` module introduced -in Python 3.4 (and available `as a separate download -`_ for Python 3.3). This makes -it possible to combine the two libraries on the same event loop. +in Python 3.4. This makes it possible to combine the two libraries on +the same event loop. -Most applications should use `AsyncIOMainLoop` to run Tornado on the -default ``asyncio`` event loop. Applications that need to run event -loops on multiple threads may use `AsyncIOLoop` to create multiple -loops. +.. deprecated:: 5.0 + + While the code in this module is still used, it is now enabled + automatically when `asyncio` is available, so applications should + no longer need to refer to this module directly. .. note:: @@ -22,35 +22,38 @@ loops. from __future__ import absolute_import, division, print_function import functools -import tornado.concurrent from tornado.gen import convert_yielded from tornado.ioloop import IOLoop from tornado import stack_context -try: - # Import the real asyncio module for py33+ first. Older versions of the - # trollius backport also use this name. - import asyncio # type: ignore -except ImportError as e: - # Asyncio itself isn't available; see if trollius is (backport to py26+). - try: - import trollius as asyncio # type: ignore - except ImportError: - # Re-raise the original asyncio error, not the trollius one. - raise e +import asyncio class BaseAsyncIOLoop(IOLoop): - def initialize(self, asyncio_loop, close_loop=False, **kwargs): - super(BaseAsyncIOLoop, self).initialize(**kwargs) + def initialize(self, asyncio_loop, **kwargs): self.asyncio_loop = asyncio_loop - self.close_loop = close_loop # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) self.handlers = {} # Set of fds listening for reads/writes self.readers = set() self.writers = set() self.closing = False + # If an asyncio loop was closed through an asyncio interface + # instead of IOLoop.close(), we'd never hear about it and may + # have left a dangling reference in our map. In case an + # application (or, more likely, a test suite) creates and + # destroys a lot of event loops in this way, check here to + # ensure that we don't have a lot of dead loops building up in + # the map. + # + # TODO(bdarnell): consider making self.asyncio_loop a weakref + # for AsyncIOMainLoop and make _ioloop_for_asyncio a + # WeakKeyDictionary. + for loop in list(IOLoop._ioloop_for_asyncio): + if loop.is_closed(): + del IOLoop._ioloop_for_asyncio[loop] + IOLoop._ioloop_for_asyncio[asyncio_loop] = self + super(BaseAsyncIOLoop, self).initialize(**kwargs) def close(self, all_fds=False): self.closing = True @@ -59,8 +62,13 @@ class BaseAsyncIOLoop(IOLoop): self.remove_handler(fd) if all_fds: self.close_fd(fileobj) - if self.close_loop: - self.asyncio_loop.close() + # Remove the mapping before closing the asyncio loop. If this + # happened in the other order, we could race against another + # initialize() call which would see the closed asyncio loop, + # assume it was closed from the asyncio side, and do this + # cleanup for us, leading to a KeyError. + del IOLoop._ioloop_for_asyncio[self.asyncio_loop] + self.asyncio_loop.close() def add_handler(self, fd, handler, events): fd, fileobj = self.split_fd(fd) @@ -114,16 +122,16 @@ class BaseAsyncIOLoop(IOLoop): handler_func(fileobj, events) def start(self): - old_current = IOLoop.current(instance=False) + try: + old_loop = asyncio.get_event_loop() + except (RuntimeError, AssertionError): + old_loop = None try: self._setup_logging() - self.make_current() + asyncio.set_event_loop(self.asyncio_loop) self.asyncio_loop.run_forever() finally: - if old_current is None: - IOLoop.clear_current() - else: - old_current.make_current() + asyncio.set_event_loop(old_loop) def stop(self): self.asyncio_loop.stop() @@ -140,67 +148,110 @@ class BaseAsyncIOLoop(IOLoop): timeout.cancel() def add_callback(self, callback, *args, **kwargs): - if self.closing: - # TODO: this is racy; we need a lock to ensure that the - # loop isn't closed during call_soon_threadsafe. - raise RuntimeError("IOLoop is closing") - self.asyncio_loop.call_soon_threadsafe( - self._run_callback, - functools.partial(stack_context.wrap(callback), *args, **kwargs)) + try: + self.asyncio_loop.call_soon_threadsafe( + self._run_callback, + functools.partial(stack_context.wrap(callback), *args, **kwargs)) + except RuntimeError: + # "Event loop is closed". Swallow the exception for + # consistency with PollIOLoop (and logical consistency + # with the fact that we can't guarantee that an + # add_callback that completes without error will + # eventually execute). + pass add_callback_from_signal = add_callback + def run_in_executor(self, executor, func, *args): + return self.asyncio_loop.run_in_executor(executor, func, *args) + + def set_default_executor(self, executor): + return self.asyncio_loop.set_default_executor(executor) + class AsyncIOMainLoop(BaseAsyncIOLoop): """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the current ``asyncio`` event loop (i.e. the one returned by - ``asyncio.get_event_loop()``). Recommended usage:: + ``asyncio.get_event_loop()``). - from tornado.platform.asyncio import AsyncIOMainLoop - import asyncio - AsyncIOMainLoop().install() - asyncio.get_event_loop().run_forever() + .. deprecated:: 5.0 - See also :meth:`tornado.ioloop.IOLoop.install` for general notes on - installing alternative IOLoops. + Now used automatically when appropriate; it is no longer necessary + to refer to this class directly. + + .. versionchanged:: 5.0 + + Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop. """ def initialize(self, **kwargs): - super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), - close_loop=False, **kwargs) + super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs) + + def make_current(self): + # AsyncIOMainLoop already refers to the current asyncio loop so + # nothing to do here. + pass class AsyncIOLoop(BaseAsyncIOLoop): """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop. This class follows the usual Tornado semantics for creating new ``IOLoops``; these loops are not necessarily related to the - ``asyncio`` default event loop. Recommended usage:: - - from tornado.ioloop import IOLoop - IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop') - IOLoop.current().start() + ``asyncio`` default event loop. Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object can be accessed with the ``asyncio_loop`` attribute. + + .. versionchanged:: 5.0 + + When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets + the current `asyncio` event loop. + + .. deprecated:: 5.0 + + Now used automatically when appropriate; it is no longer necessary + to refer to this class directly. """ def initialize(self, **kwargs): + self.is_current = False loop = asyncio.new_event_loop() try: - super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs) + super(AsyncIOLoop, self).initialize(loop, **kwargs) except Exception: # If initialize() does not succeed (taking ownership of the loop), # we have to close it. loop.close() raise + def close(self, all_fds=False): + if self.is_current: + self.clear_current() + super(AsyncIOLoop, self).close(all_fds=all_fds) + + def make_current(self): + if not self.is_current: + try: + self.old_asyncio = asyncio.get_event_loop() + except (RuntimeError, AssertionError): + self.old_asyncio = None + self.is_current = True + asyncio.set_event_loop(self.asyncio_loop) + + def _clear_current_hook(self): + if self.is_current: + asyncio.set_event_loop(self.old_asyncio) + self.is_current = False + def to_tornado_future(asyncio_future): """Convert an `asyncio.Future` to a `tornado.concurrent.Future`. .. versionadded:: 4.1 + + .. deprecated:: 5.0 + Tornado ``Futures`` have been merged with `asyncio.Future`, + so this method is now a no-op. """ - tf = tornado.concurrent.Future() - tornado.concurrent.chain_future(asyncio_future, tf) - return tf + return asyncio_future def to_asyncio_future(tornado_future): @@ -211,12 +262,38 @@ def to_asyncio_future(tornado_future): .. versionchanged:: 4.3 Now accepts any yieldable object, not just `tornado.concurrent.Future`. + + .. deprecated:: 5.0 + Tornado ``Futures`` have been merged with `asyncio.Future`, + so this method is now equivalent to `tornado.gen.convert_yielded`. """ - tornado_future = convert_yielded(tornado_future) - af = asyncio.Future() - tornado.concurrent.chain_future(tornado_future, af) - return af + return convert_yielded(tornado_future) -if hasattr(convert_yielded, 'register'): - convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore +class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy): + """Event loop policy that allows loop creation on any thread. + + The default `asyncio` event loop policy only automatically creates + event loops in the main threads. Other threads must create event + loops explicitly or `asyncio.get_event_loop` (and therefore + `.IOLoop.current`) will fail. Installing this policy allows event + loops to be created automatically on any thread, matching the + behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). + + Usage:: + + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + + .. versionadded:: 5.0 + + """ + def get_event_loop(self): + try: + return super().get_event_loop() + except (RuntimeError, AssertionError): + # This was an AssertionError in python 3.4.2 (which ships with debian jessie) + # and changed to a RuntimeError in 3.4.3. + # "There is no current event loop in thread %r" + loop = self.new_event_loop() + self.set_event_loop(loop) + return loop diff --git a/server/www/packages/packages-common/tornado/platform/auto.py b/server/www/packages/packages-common/tornado/platform/auto.py index 1f4d700..1a9133f 100644 --- a/server/www/packages/packages-common/tornado/platform/auto.py +++ b/server/www/packages/packages-common/tornado/platform/auto.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2011 Facebook # diff --git a/server/www/packages/packages-common/tornado/platform/caresresolver.py b/server/www/packages/packages-common/tornado/platform/caresresolver.py index fd6e9d2..768cb62 100644 --- a/server/www/packages/packages-common/tornado/platform/caresresolver.py +++ b/server/www/packages/packages-common/tornado/platform/caresresolver.py @@ -2,6 +2,7 @@ from __future__ import absolute_import, division, print_function import pycares # type: ignore import socket +from tornado.concurrent import Future from tornado import gen from tornado.ioloop import IOLoop from tornado.netutil import Resolver, is_valid_ip @@ -19,11 +20,11 @@ class CaresResolver(Resolver): the default for ``tornado.simple_httpclient``, but other libraries may default to ``AF_UNSPEC``. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ - def initialize(self, io_loop=None): - self.io_loop = io_loop or IOLoop.current() + def initialize(self): + self.io_loop = IOLoop.current() self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb) self.fds = {} @@ -55,11 +56,10 @@ class CaresResolver(Resolver): addresses = [host] else: # gethostbyname doesn't take callback as a kwarg - self.channel.gethostbyname(host, family, (yield gen.Callback(1))) - callback_args = yield gen.Wait(1) - assert isinstance(callback_args, gen.Arguments) - assert not callback_args.kwargs - result, error = callback_args.args + fut = Future() + self.channel.gethostbyname(host, family, + lambda result, error: fut.set_result((result, error))) + result, error = yield fut if error: raise IOError('C-Ares returned error %s: %s while resolving %s' % (error, pycares.errno.strerror(error), host)) diff --git a/server/www/packages/packages-common/tornado/platform/common.py b/server/www/packages/packages-common/tornado/platform/common.py index a73f8db..b597748 100644 --- a/server/www/packages/packages-common/tornado/platform/common.py +++ b/server/www/packages/packages-common/tornado/platform/common.py @@ -32,10 +32,12 @@ class Waker(interface.Waker): and Jython. """ def __init__(self): + from .auto import set_close_exec # Based on Zope select_trigger.py: # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py self.writer = socket.socket() + set_close_exec(self.writer.fileno()) # Disable buffering -- pulling the trigger sends 1 byte, # and we want that sent immediately, to wake up ASAP. self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) @@ -54,6 +56,7 @@ class Waker(interface.Waker): # http://mail.zope.org/pipermail/zope/2005-July/160433.html # for hideous details. a = socket.socket() + set_close_exec(a.fileno()) a.bind(("127.0.0.1", 0)) a.listen(1) connect_address = a.getsockname() # assigned (host, port) pair @@ -78,6 +81,7 @@ class Waker(interface.Waker): a.close() self.reader, addr = a.accept() + set_close_exec(self.reader.fileno()) self.reader.setblocking(0) self.writer.setblocking(0) a.close() diff --git a/server/www/packages/packages-common/tornado/platform/epoll.py b/server/www/packages/packages-common/tornado/platform/epoll.py index 80bfd8a..4e34617 100644 --- a/server/www/packages/packages-common/tornado/platform/epoll.py +++ b/server/www/packages/packages-common/tornado/platform/epoll.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2012 Facebook # diff --git a/server/www/packages/packages-common/tornado/platform/interface.py b/server/www/packages/packages-common/tornado/platform/interface.py index c0ef290..cac5326 100644 --- a/server/www/packages/packages-common/tornado/platform/interface.py +++ b/server/www/packages/packages-common/tornado/platform/interface.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2011 Facebook # diff --git a/server/www/packages/packages-common/tornado/platform/kqueue.py b/server/www/packages/packages-common/tornado/platform/kqueue.py index 3a5d417..4e0aee0 100644 --- a/server/www/packages/packages-common/tornado/platform/kqueue.py +++ b/server/www/packages/packages-common/tornado/platform/kqueue.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2012 Facebook # diff --git a/server/www/packages/packages-common/tornado/platform/posix.py b/server/www/packages/packages-common/tornado/platform/posix.py index 9bf1f18..6fe1fa8 100644 --- a/server/www/packages/packages-common/tornado/platform/posix.py +++ b/server/www/packages/packages-common/tornado/platform/posix.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2011 Facebook # diff --git a/server/www/packages/packages-common/tornado/platform/select.py b/server/www/packages/packages-common/tornado/platform/select.py index a18049f..14e8a47 100644 --- a/server/www/packages/packages-common/tornado/platform/select.py +++ b/server/www/packages/packages-common/tornado/platform/select.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2012 Facebook # diff --git a/server/www/packages/packages-common/tornado/platform/twisted.py b/server/www/packages/packages-common/tornado/platform/twisted.py index 0f9787e..b38a755 100644 --- a/server/www/packages/packages-common/tornado/platform/twisted.py +++ b/server/www/packages/packages-common/tornado/platform/twisted.py @@ -32,7 +32,7 @@ import sys import twisted.internet.abstract # type: ignore from twisted.internet.defer import Deferred # type: ignore from twisted.internet.posixbase import PosixReactorBase # type: ignore -from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore +from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore # noqa: E501 from twisted.python import failure, log # type: ignore from twisted.internet import error # type: ignore import twisted.names.cache # type: ignore @@ -42,7 +42,7 @@ import twisted.names.resolve # type: ignore from zope.interface import implementer # type: ignore -from tornado.concurrent import Future +from tornado.concurrent import Future, future_set_exc_info from tornado.escape import utf8 from tornado import gen import tornado.ioloop @@ -112,7 +112,7 @@ class TornadoReactor(PosixReactorBase): instead of ``reactor.run()``. It is also possible to create a non-global reactor by calling - ``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if + ``tornado.platform.twisted.TornadoReactor()``. However, if the `.IOLoop` and reactor are to be short-lived (such as those used in unit tests), additional cleanup may be required. Specifically, it is recommended to call:: @@ -122,13 +122,18 @@ class TornadoReactor(PosixReactorBase): before closing the `.IOLoop`. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. deprecated:: 5.1 + + This class will be removed in Tornado 6.0. Use + ``twisted.internet.asyncioreactor.AsyncioSelectorReactor`` + instead. + """ - def __init__(self, io_loop=None): - if not io_loop: - io_loop = tornado.ioloop.IOLoop.current() - self._io_loop = io_loop + def __init__(self): + self._io_loop = tornado.ioloop.IOLoop.current() self._readers = {} # map of reader objects to fd self._writers = {} # map of writer objects to fd self._fds = {} # a map of fd to a (reader, writer) tuple @@ -319,7 +324,10 @@ class _TestReactor(TornadoReactor): """ def __init__(self): # always use a new ioloop - super(_TestReactor, self).__init__(IOLoop()) + IOLoop.clear_current() + IOLoop(make_current=True) + super(_TestReactor, self).__init__() + IOLoop.clear_current() def listenTCP(self, port, factory, backlog=50, interface=''): # default to localhost to avoid firewall prompts on the mac @@ -335,7 +343,7 @@ class _TestReactor(TornadoReactor): port, protocol, interface=interface, maxPacketSize=maxPacketSize) -def install(io_loop=None): +def install(): """Install this package as the default Twisted reactor. ``install()`` must be called very early in the startup process, @@ -346,13 +354,15 @@ def install(io_loop=None): in multi-process mode, and an external process manager such as ``supervisord`` is recommended instead. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + .. deprecated:: 5.1 + + This functio will be removed in Tornado 6.0. Use + ``twisted.internet.asyncioreactor.install`` instead. """ - if not io_loop: - io_loop = tornado.ioloop.IOLoop.current() - reactor = TornadoReactor(io_loop) + reactor = TornadoReactor() from twisted.internet.main import installReactor # type: ignore installReactor(reactor) return reactor @@ -384,6 +394,8 @@ class _FD(object): self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR) self.lost = True + writeConnectionLost = readConnectionLost = connectionLost + def logPrefix(self): return '' @@ -410,6 +422,11 @@ class TwistedIOLoop(tornado.ioloop.IOLoop): See also :meth:`tornado.ioloop.IOLoop.install` for general notes on installing alternative IOLoops. + + .. deprecated:: 5.1 + + The `asyncio` event loop will be the only available implementation in + Tornado 6.0. """ def initialize(self, reactor=None, **kwargs): super(TwistedIOLoop, self).initialize(**kwargs) @@ -519,21 +536,20 @@ class TwistedResolver(Resolver): recommended only when threads cannot be used, since it has limitations compared to the standard ``getaddrinfo``-based `~tornado.netutil.Resolver` and - `~tornado.netutil.ThreadedResolver`. Specifically, it returns at + `~tornado.netutil.DefaultExecutorResolver`. Specifically, it returns at most one result, and arguments other than ``host`` and ``family`` are ignored. It may fail to resolve when ``family`` is not ``socket.AF_UNSPEC``. Requires Twisted 12.1 or newer. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ - def initialize(self, io_loop=None): - self.io_loop = io_loop or IOLoop.current() + def initialize(self): # partial copy of twisted.names.client.createResolver, which doesn't # allow for a reactor to be passed in. - self.reactor = tornado.platform.twisted.TornadoReactor(io_loop) + self.reactor = tornado.platform.twisted.TornadoReactor() host_resolver = twisted.names.hosts.Resolver('/etc/hosts') cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor) @@ -554,7 +570,9 @@ class TwistedResolver(Resolver): resolved_family = socket.AF_INET6 else: deferred = self.resolver.getHostByName(utf8(host)) - resolved = yield gen.Task(deferred.addBoth) + fut = Future() + deferred.addBoth(fut.set_result) + resolved = yield fut if isinstance(resolved, failure.Failure): try: resolved.raiseException() @@ -586,6 +604,6 @@ if hasattr(gen.convert_yielded, 'register'): # Should never happen, but just in case raise Exception("errback called without error") except: - f.set_exc_info(sys.exc_info()) + future_set_exc_info(f, sys.exc_info()) d.addCallbacks(f.set_result, errback) return f diff --git a/server/www/packages/packages-common/tornado/platform/windows.py b/server/www/packages/packages-common/tornado/platform/windows.py index e94a0cf..4127700 100644 --- a/server/www/packages/packages-common/tornado/platform/windows.py +++ b/server/www/packages/packages-common/tornado/platform/windows.py @@ -8,7 +8,7 @@ import ctypes.wintypes # type: ignore # See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation -SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) +SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) # noqa: E501 SetHandleInformation.restype = ctypes.wintypes.BOOL HANDLE_FLAG_INHERIT = 0x00000001 diff --git a/server/www/packages/packages-common/tornado/process.py b/server/www/packages/packages-common/tornado/process.py index fae94f3..122fd7e 100644 --- a/server/www/packages/packages-common/tornado/process.py +++ b/server/www/packages/packages-common/tornado/process.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2011 Facebook # @@ -29,7 +28,7 @@ import time from binascii import hexlify -from tornado.concurrent import Future +from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado import ioloop from tornado.iostream import PipeIOStream from tornado.log import gen_log @@ -126,10 +125,6 @@ def fork_processes(num_processes, max_restarts=100): assert _task_id is None if num_processes is None or num_processes <= 0: num_processes = cpu_count() - if ioloop.IOLoop.initialized(): - raise RuntimeError("Cannot run in multiple processes: IOLoop instance " - "has already been initialized. You cannot call " - "IOLoop.instance() before calling start_processes()") gen_log.info("Starting %d processes", num_processes) children = {} @@ -199,16 +194,17 @@ class Subprocess(object): * ``stdin``, ``stdout``, and ``stderr`` may have the value ``tornado.process.Subprocess.STREAM``, which will make the corresponding - attribute of the resulting Subprocess a `.PipeIOStream`. - * A new keyword argument ``io_loop`` may be used to pass in an IOLoop. + attribute of the resulting Subprocess a `.PipeIOStream`. If this option + is used, the caller is responsible for closing the streams when done + with them. The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and ``wait_for_exit`` methods do not work on Windows. There is therefore no reason to use this class instead of ``subprocess.Popen`` on that platform. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ STREAM = object() @@ -217,7 +213,7 @@ class Subprocess(object): _waiting = {} # type: ignore def __init__(self, *args, **kwargs): - self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current() + self.io_loop = ioloop.IOLoop.current() # All FDs we create should be closed on error; those in to_close # should be closed in the parent process on success. pipe_fds = [] @@ -227,19 +223,19 @@ class Subprocess(object): kwargs['stdin'] = in_r pipe_fds.extend((in_r, in_w)) to_close.append(in_r) - self.stdin = PipeIOStream(in_w, io_loop=self.io_loop) + self.stdin = PipeIOStream(in_w) if kwargs.get('stdout') is Subprocess.STREAM: out_r, out_w = _pipe_cloexec() kwargs['stdout'] = out_w pipe_fds.extend((out_r, out_w)) to_close.append(out_w) - self.stdout = PipeIOStream(out_r, io_loop=self.io_loop) + self.stdout = PipeIOStream(out_r) if kwargs.get('stderr') is Subprocess.STREAM: err_r, err_w = _pipe_cloexec() kwargs['stderr'] = err_w pipe_fds.extend((err_r, err_w)) to_close.append(err_w) - self.stderr = PipeIOStream(err_r, io_loop=self.io_loop) + self.stderr = PipeIOStream(err_r) try: self.proc = subprocess.Popen(*args, **kwargs) except: @@ -270,7 +266,7 @@ class Subprocess(object): signal handler is causing a problem. """ self._exit_callback = stack_context.wrap(callback) - Subprocess.initialize(self.io_loop) + Subprocess.initialize() Subprocess._waiting[self.pid] = self Subprocess._try_cleanup_process(self.pid) @@ -297,12 +293,12 @@ class Subprocess(object): # Unfortunately we don't have the original args any more. future.set_exception(CalledProcessError(ret, None)) else: - future.set_result(ret) + future_set_result_unless_cancelled(future, ret) self.set_exit_callback(callback) return future @classmethod - def initialize(cls, io_loop=None): + def initialize(cls): """Initializes the ``SIGCHLD`` handler. The signal handler is run on an `.IOLoop` to avoid locking issues. @@ -310,13 +306,13 @@ class Subprocess(object): same one used by individual Subprocess objects (as long as the ``IOLoops`` are each running in separate threads). - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been + removed. """ if cls._initialized: return - if io_loop is None: - io_loop = ioloop.IOLoop.current() + io_loop = ioloop.IOLoop.current() cls._old_sigchld = signal.signal( signal.SIGCHLD, lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup)) diff --git a/server/www/packages/packages-common/tornado/queues.py b/server/www/packages/packages-common/tornado/queues.py index 0041a80..7cb96bf 100644 --- a/server/www/packages/packages-common/tornado/queues.py +++ b/server/www/packages/packages-common/tornado/queues.py @@ -12,7 +12,9 @@ # License for the specific language governing permissions and limitations # under the License. -"""Asynchronous queues for coroutines. +"""Asynchronous queues for coroutines. These classes are very similar +to those provided in the standard library's `asyncio package +`_. .. warning:: @@ -20,6 +22,7 @@ are *not* thread-safe. To use these queues from another thread, use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread before calling any queue methods. + """ from __future__ import absolute_import, division, print_function @@ -28,7 +31,7 @@ import collections import heapq from tornado import gen, ioloop -from tornado.concurrent import Future +from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado.locks import Event __all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty'] @@ -47,7 +50,8 @@ class QueueFull(Exception): def _set_timeout(future, timeout): if timeout: def on_timeout(): - future.set_exception(gen.TimeoutError()) + if not future.done(): + future.set_exception(gen.TimeoutError()) io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) future.add_done_callback( @@ -75,28 +79,24 @@ class Queue(object): q = Queue(maxsize=2) - @gen.coroutine - def consumer(): - while True: - item = yield q.get() + async def consumer(): + async for item in q: try: print('Doing work on %s' % item) - yield gen.sleep(0.01) + await gen.sleep(0.01) finally: q.task_done() - @gen.coroutine - def producer(): + async def producer(): for item in range(5): - yield q.put(item) + await q.put(item) print('Put %s' % item) - @gen.coroutine - def main(): + async def main(): # Start consumer without waiting (since it never finishes). IOLoop.current().spawn_callback(consumer) - yield producer() # Wait for producer to put all tasks. - yield q.join() # Wait for consumer to finish all tasks. + await producer() # Wait for producer to put all tasks. + await q.join() # Wait for consumer to finish all tasks. print('Done') IOLoop.current().run_sync(main) @@ -115,11 +115,14 @@ class Queue(object): Doing work on 4 Done - In Python 3.5, `Queue` implements the async iterator protocol, so - ``consumer()`` could be rewritten as:: - async def consumer(): - async for item in q: + In versions of Python without native coroutines (before 3.5), + ``consumer()`` could be written as:: + + @gen.coroutine + def consumer(): + while True: + item = yield q.get() try: print('Doing work on %s' % item) yield gen.sleep(0.01) @@ -166,18 +169,23 @@ class Queue(object): def put(self, item, timeout=None): """Put an item into the queue, perhaps waiting until there is room. - Returns a Future, which raises `tornado.gen.TimeoutError` after a + Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. + + ``timeout`` may be a number denoting a time (on the same + scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a + `datetime.timedelta` object for a deadline relative to the + current time. """ + future = Future() try: self.put_nowait(item) except QueueFull: - future = Future() self._putters.append((item, future)) _set_timeout(future, timeout) - return future else: - return gen._null_future + future.set_result(None) + return future def put_nowait(self, item): """Put an item into the queue without blocking. @@ -189,7 +197,7 @@ class Queue(object): assert self.empty(), "queue non-empty, why are getters waiting?" getter = self._getters.popleft() self.__put_internal(item) - getter.set_result(self._get()) + future_set_result_unless_cancelled(getter, self._get()) elif self.full(): raise QueueFull else: @@ -199,7 +207,12 @@ class Queue(object): """Remove and return an item from the queue. Returns a Future which resolves once an item is available, or raises - `tornado.gen.TimeoutError` after a timeout. + `tornado.util.TimeoutError` after a timeout. + + ``timeout`` may be a number denoting a time (on the same + scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a + `datetime.timedelta` object for a deadline relative to the + current time. """ future = Future() try: @@ -220,7 +233,7 @@ class Queue(object): assert self.full(), "queue not full, why are putters waiting?" item, putter = self._putters.popleft() self.__put_internal(item) - putter.set_result(None) + future_set_result_unless_cancelled(putter, None) return self._get() elif self.qsize(): return self._get() @@ -248,12 +261,11 @@ class Queue(object): def join(self, timeout=None): """Block until all items in the queue are processed. - Returns a Future, which raises `tornado.gen.TimeoutError` after a + Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. """ return self._finished.wait(timeout) - @gen.coroutine def __aiter__(self): return _QueueIterator(self) diff --git a/server/www/packages/packages-common/tornado/routing.py b/server/www/packages/packages-common/tornado/routing.py index 6762dc0..e56d1a7 100644 --- a/server/www/packages/packages-common/tornado/routing.py +++ b/server/www/packages/packages-common/tornado/routing.py @@ -242,6 +242,11 @@ class _RoutingDelegate(httputil.HTTPMessageDelegate): start_line=start_line, headers=headers) self.delegate = self.router.find_handler(request) + if self.delegate is None: + app_log.debug("Delegate for %s %s request not found", + start_line.method, start_line.path) + self.delegate = _DefaultMessageDelegate(self.request_conn) + return self.delegate.headers_received(start_line, headers) def data_received(self, chunk): @@ -254,6 +259,16 @@ class _RoutingDelegate(httputil.HTTPMessageDelegate): self.delegate.on_connection_close() +class _DefaultMessageDelegate(httputil.HTTPMessageDelegate): + def __init__(self, connection): + self.connection = connection + + def finish(self): + self.connection.write_headers( + httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders()) + self.connection.finish() + + class RuleRouter(Router): """Rule-based router implementation.""" @@ -278,7 +293,8 @@ class RuleRouter(Router): ]) In the examples above, ``Target`` can be a nested `Router` instance, an instance of - `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument. + `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, + accepting a request argument. :arg rules: a list of `Rule` instances or tuples of `Rule` constructor arguments. @@ -567,7 +583,7 @@ class PathMatches(Matcher): else: try: unescaped_fragment = re_unescape(fragment) - except ValueError as exc: + except ValueError: # If we can't unescape part of it, we can't # reverse this url. return (None, None) @@ -589,7 +605,7 @@ class URLSpec(Rule): * ``pattern``: Regular expression to be matched. Any capturing groups in the regex will be passed in to the handler's get/post/etc methods as arguments (by keyword if named, by - position if unnamed. Named and unnamed capturing groups may + position if unnamed. Named and unnamed capturing groups may not be mixed in the same rule). * ``handler``: `~.web.RequestHandler` subclass to be invoked. diff --git a/server/www/packages/packages-common/tornado/simple_httpclient.py b/server/www/packages/packages-common/tornado/simple_httpclient.py index 8fb7070..60b7956 100644 --- a/server/www/packages/packages-common/tornado/simple_httpclient.py +++ b/server/www/packages/packages-common/tornado/simple_httpclient.py @@ -1,11 +1,11 @@ -#!/usr/bin/env python from __future__ import absolute_import, division, print_function -from tornado.escape import utf8, _unicode +from tornado.escape import _unicode from tornado import gen from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy from tornado import httputil from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters +from tornado.ioloop import IOLoop from tornado.iostream import StreamClosedError from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults from tornado.log import gen_log @@ -20,6 +20,7 @@ import functools import re import socket import sys +import time from io import BytesIO @@ -34,17 +35,38 @@ except ImportError: # ssl is not available on Google App Engine. ssl = None -try: - import certifi -except ImportError: - certifi = None + +class HTTPTimeoutError(HTTPError): + """Error raised by SimpleAsyncHTTPClient on timeout. + + For historical reasons, this is a subclass of `.HTTPClientError` + which simulates a response code of 599. + + .. versionadded:: 5.1 + """ + def __init__(self, message): + super(HTTPTimeoutError, self).__init__(599, message=message) + + def __str__(self): + return self.message -def _default_ca_certs(): - if certifi is None: - raise Exception("The 'certifi' package is required to use https " - "in simple_httpclient") - return certifi.where() +class HTTPStreamClosedError(HTTPError): + """Error raised by SimpleAsyncHTTPClient when the underlying stream is closed. + + When a more specific exception is available (such as `ConnectionResetError`), + it may be raised instead of this one. + + For historical reasons, this is a subclass of `.HTTPClientError` + which simulates a response code of 599. + + .. versionadded:: 5.1 + """ + def __init__(self, message): + super(HTTPStreamClosedError, self).__init__(599, message=message) + + def __str__(self): + return self.message class SimpleAsyncHTTPClient(AsyncHTTPClient): @@ -56,7 +78,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): are not reused, and callers cannot select the network interface to be used. """ - def initialize(self, io_loop, max_clients=10, + def initialize(self, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None, max_header_size=None, max_body_size=None): @@ -92,8 +114,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): .. versionchanged:: 4.2 Added the ``max_body_size`` argument. """ - super(SimpleAsyncHTTPClient, self).initialize(io_loop, - defaults=defaults) + super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults) self.max_clients = max_clients self.queue = collections.deque() self.active = {} @@ -107,12 +128,12 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): self.resolver = resolver self.own_resolver = False else: - self.resolver = Resolver(io_loop=io_loop) + self.resolver = Resolver() self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) - self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop) + self.tcp_client = TCPClient(resolver=self.resolver) def close(self): super(SimpleAsyncHTTPClient, self).close() @@ -153,7 +174,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): def _handle_request(self, request, release_callback, final_callback): self._connection_class()( - self.io_loop, self, request, release_callback, + self, request, release_callback, final_callback, self.max_buffer_size, self.tcp_client, self.max_header_size, self.max_body_size) @@ -181,7 +202,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): error_message = "Timeout {0}".format(info) if info else "Timeout" timeout_response = HTTPResponse( - request, 599, error=HTTPError(599, error_message), + request, 599, error=HTTPTimeoutError(error_message), request_time=self.io_loop.time() - request.start_time) self.io_loop.add_callback(callback, timeout_response) del self.waiting[key] @@ -190,11 +211,12 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): class _HTTPConnection(httputil.HTTPMessageDelegate): _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) - def __init__(self, io_loop, client, request, release_callback, + def __init__(self, client, request, release_callback, final_callback, max_buffer_size, tcp_client, max_header_size, max_body_size): - self.start_time = io_loop.time() - self.io_loop = io_loop + self.io_loop = IOLoop.current() + self.start_time = self.io_loop.time() + self.start_wall_time = time.time() self.client = client self.request = request self.release_callback = release_callback @@ -210,7 +232,11 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): # Timeout handle returned by IOLoop.add_timeout self._timeout = None self._sockaddr = None - with stack_context.ExceptionStackContext(self._handle_exception): + IOLoop.current().add_callback(self.run) + + @gen.coroutine + def run(self): + try: self.parsed = urlparse.urlsplit(_unicode(self.request.url)) if self.parsed.scheme not in ("http", "https"): raise ValueError("Unsupported url scheme: %s" % @@ -228,7 +254,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): host = host[1:-1] self.parsed_hostname = host # save final host for _on_connect - if request.allow_ipv6 is False: + if self.request.allow_ipv6 is False: af = socket.AF_INET else: af = socket.AF_UNSPEC @@ -240,10 +266,93 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): self._timeout = self.io_loop.add_timeout( self.start_time + timeout, stack_context.wrap(functools.partial(self._on_timeout, "while connecting"))) - self.tcp_client.connect(host, port, af=af, - ssl_options=ssl_options, - max_buffer_size=self.max_buffer_size, - callback=self._on_connect) + stream = yield self.tcp_client.connect( + host, port, af=af, + ssl_options=ssl_options, + max_buffer_size=self.max_buffer_size) + + if self.final_callback is None: + # final_callback is cleared if we've hit our timeout. + stream.close() + return + self.stream = stream + self.stream.set_close_callback(self.on_connection_close) + self._remove_timeout() + if self.final_callback is None: + return + if self.request.request_timeout: + self._timeout = self.io_loop.add_timeout( + self.start_time + self.request.request_timeout, + stack_context.wrap(functools.partial(self._on_timeout, "during request"))) + if (self.request.method not in self._SUPPORTED_METHODS and + not self.request.allow_nonstandard_methods): + raise KeyError("unknown method %s" % self.request.method) + for key in ('network_interface', + 'proxy_host', 'proxy_port', + 'proxy_username', 'proxy_password', + 'proxy_auth_mode'): + if getattr(self.request, key, None): + raise NotImplementedError('%s not supported' % key) + if "Connection" not in self.request.headers: + self.request.headers["Connection"] = "close" + if "Host" not in self.request.headers: + if '@' in self.parsed.netloc: + self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1] + else: + self.request.headers["Host"] = self.parsed.netloc + username, password = None, None + if self.parsed.username is not None: + username, password = self.parsed.username, self.parsed.password + elif self.request.auth_username is not None: + username = self.request.auth_username + password = self.request.auth_password or '' + if username is not None: + if self.request.auth_mode not in (None, "basic"): + raise ValueError("unsupported auth_mode %s", + self.request.auth_mode) + self.request.headers["Authorization"] = ( + b"Basic " + base64.b64encode( + httputil.encode_username_password(username, password))) + if self.request.user_agent: + self.request.headers["User-Agent"] = self.request.user_agent + if not self.request.allow_nonstandard_methods: + # Some HTTP methods nearly always have bodies while others + # almost never do. Fail in this case unless the user has + # opted out of sanity checks with allow_nonstandard_methods. + body_expected = self.request.method in ("POST", "PATCH", "PUT") + body_present = (self.request.body is not None or + self.request.body_producer is not None) + if ((body_expected and not body_present) or + (body_present and not body_expected)): + raise ValueError( + 'Body must %sbe None for method %s (unless ' + 'allow_nonstandard_methods is true)' % + ('not ' if body_expected else '', self.request.method)) + if self.request.expect_100_continue: + self.request.headers["Expect"] = "100-continue" + if self.request.body is not None: + # When body_producer is used the caller is responsible for + # setting Content-Length (or else chunked encoding will be used). + self.request.headers["Content-Length"] = str(len( + self.request.body)) + if (self.request.method == "POST" and + "Content-Type" not in self.request.headers): + self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" + if self.request.decompress_response: + self.request.headers["Accept-Encoding"] = "gzip" + req_path = ((self.parsed.path or '/') + + (('?' + self.parsed.query) if self.parsed.query else '')) + self.connection = self._create_connection(stream) + start_line = httputil.RequestStartLine(self.request.method, + req_path, '') + self.connection.write_headers(start_line, self.request.headers) + if self.request.expect_100_continue: + yield self.connection.read_response(self) + else: + yield self._write_body(True) + except Exception: + if not self._handle_exception(*sys.exc_info()): + raise def _get_ssl_options(self, scheme): if scheme == "https": @@ -256,142 +365,39 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): self.request.client_cert is None and self.request.client_key is None): return _client_ssl_defaults - ssl_options = {} - if self.request.validate_cert: - ssl_options["cert_reqs"] = ssl.CERT_REQUIRED - if self.request.ca_certs is not None: - ssl_options["ca_certs"] = self.request.ca_certs - elif not hasattr(ssl, 'create_default_context'): - # When create_default_context is present, - # we can omit the "ca_certs" parameter entirely, - # which avoids the dependency on "certifi" for py34. - ssl_options["ca_certs"] = _default_ca_certs() - if self.request.client_key is not None: - ssl_options["keyfile"] = self.request.client_key + ssl_ctx = ssl.create_default_context( + ssl.Purpose.SERVER_AUTH, + cafile=self.request.ca_certs) + if not self.request.validate_cert: + ssl_ctx.check_hostname = False + ssl_ctx.verify_mode = ssl.CERT_NONE if self.request.client_cert is not None: - ssl_options["certfile"] = self.request.client_cert - - # SSL interoperability is tricky. We want to disable - # SSLv2 for security reasons; it wasn't disabled by default - # until openssl 1.0. The best way to do this is to use - # the SSL_OP_NO_SSLv2, but that wasn't exposed to python - # until 3.2. Python 2.7 adds the ciphers argument, which - # can also be used to disable SSLv2. As a last resort - # on python 2.6, we set ssl_version to TLSv1. This is - # more narrow than we'd like since it also breaks - # compatibility with servers configured for SSLv3 only, - # but nearly all servers support both SSLv3 and TLSv1: - # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html - if sys.version_info >= (2, 7): - # In addition to disabling SSLv2, we also exclude certain - # classes of insecure ciphers. - ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES" - else: - # This is really only necessary for pre-1.0 versions - # of openssl, but python 2.6 doesn't expose version - # information. - ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1 - return ssl_options + ssl_ctx.load_cert_chain(self.request.client_cert, + self.request.client_key) + if hasattr(ssl, 'OP_NO_COMPRESSION'): + # See netutil.ssl_options_to_context + ssl_ctx.options |= ssl.OP_NO_COMPRESSION + return ssl_ctx return None def _on_timeout(self, info=None): """Timeout callback of _HTTPConnection instance. - Raise a timeout HTTPError when a timeout occurs. + Raise a `HTTPTimeoutError` when a timeout occurs. :info string key: More detailed timeout information. """ self._timeout = None error_message = "Timeout {0}".format(info) if info else "Timeout" if self.final_callback is not None: - raise HTTPError(599, error_message) + self._handle_exception(HTTPTimeoutError, HTTPTimeoutError(error_message), + None) def _remove_timeout(self): if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None - def _on_connect(self, stream): - if self.final_callback is None: - # final_callback is cleared if we've hit our timeout. - stream.close() - return - self.stream = stream - self.stream.set_close_callback(self.on_connection_close) - self._remove_timeout() - if self.final_callback is None: - return - if self.request.request_timeout: - self._timeout = self.io_loop.add_timeout( - self.start_time + self.request.request_timeout, - stack_context.wrap(functools.partial(self._on_timeout, "during request"))) - if (self.request.method not in self._SUPPORTED_METHODS and - not self.request.allow_nonstandard_methods): - raise KeyError("unknown method %s" % self.request.method) - for key in ('network_interface', - 'proxy_host', 'proxy_port', - 'proxy_username', 'proxy_password', - 'proxy_auth_mode'): - if getattr(self.request, key, None): - raise NotImplementedError('%s not supported' % key) - if "Connection" not in self.request.headers: - self.request.headers["Connection"] = "close" - if "Host" not in self.request.headers: - if '@' in self.parsed.netloc: - self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1] - else: - self.request.headers["Host"] = self.parsed.netloc - username, password = None, None - if self.parsed.username is not None: - username, password = self.parsed.username, self.parsed.password - elif self.request.auth_username is not None: - username = self.request.auth_username - password = self.request.auth_password or '' - if username is not None: - if self.request.auth_mode not in (None, "basic"): - raise ValueError("unsupported auth_mode %s", - self.request.auth_mode) - auth = utf8(username) + b":" + utf8(password) - self.request.headers["Authorization"] = (b"Basic " + - base64.b64encode(auth)) - if self.request.user_agent: - self.request.headers["User-Agent"] = self.request.user_agent - if not self.request.allow_nonstandard_methods: - # Some HTTP methods nearly always have bodies while others - # almost never do. Fail in this case unless the user has - # opted out of sanity checks with allow_nonstandard_methods. - body_expected = self.request.method in ("POST", "PATCH", "PUT") - body_present = (self.request.body is not None or - self.request.body_producer is not None) - if ((body_expected and not body_present) or - (body_present and not body_expected)): - raise ValueError( - 'Body must %sbe None for method %s (unless ' - 'allow_nonstandard_methods is true)' % - ('not ' if body_expected else '', self.request.method)) - if self.request.expect_100_continue: - self.request.headers["Expect"] = "100-continue" - if self.request.body is not None: - # When body_producer is used the caller is responsible for - # setting Content-Length (or else chunked encoding will be used). - self.request.headers["Content-Length"] = str(len( - self.request.body)) - if (self.request.method == "POST" and - "Content-Type" not in self.request.headers): - self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" - if self.request.decompress_response: - self.request.headers["Accept-Encoding"] = "gzip" - req_path = ((self.parsed.path or '/') + - (('?' + self.parsed.query) if self.parsed.query else '')) - self.connection = self._create_connection(stream) - start_line = httputil.RequestStartLine(self.request.method, - req_path, '') - self.connection.write_headers(start_line, self.request.headers) - if self.request.expect_100_continue: - self._read_response() - else: - self._write_body(True) - def _create_connection(self, stream): stream.set_nodelay(True) connection = HTTP1Connection( @@ -404,31 +410,21 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): self._sockaddr) return connection + @gen.coroutine def _write_body(self, start_read): if self.request.body is not None: self.connection.write(self.request.body) elif self.request.body_producer is not None: fut = self.request.body_producer(self.connection.write) if fut is not None: - fut = gen.convert_yielded(fut) - - def on_body_written(fut): - fut.result() - self.connection.finish() - if start_read: - self._read_response() - self.io_loop.add_future(fut, on_body_written) - return + yield fut self.connection.finish() if start_read: - self._read_response() - - def _read_response(self): - # Ensure that any exception raised in read_response ends up in our - # stack context. - self.io_loop.add_future( - self.connection.read_response(self), - lambda f: f.result()) + try: + yield self.connection.read_response(self) + except StreamClosedError: + if not self._handle_exception(*sys.exc_info()): + raise def _release(self): if self.release_callback is not None: @@ -448,11 +444,12 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): self._remove_timeout() if isinstance(value, StreamClosedError): if value.real_error is None: - value = HTTPError(599, "Stream closed") + value = HTTPStreamClosedError("Stream closed") else: value = value.real_error self._run_callback(HTTPResponse(self.request, 599, error=value, request_time=self.io_loop.time() - self.start_time, + start_time=self.start_wall_time, )) if hasattr(self, "stream"): @@ -474,8 +471,8 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): if self.stream.error: raise self.stream.error try: - raise HTTPError(599, message) - except HTTPError: + raise HTTPStreamClosedError(message) + except HTTPStreamClosedError: self._handle_exception(*sys.exc_info()) def headers_received(self, first_line, headers): @@ -533,7 +530,8 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): final_callback = self.final_callback self.final_callback = None self._release() - self.client.fetch(new_request, final_callback) + fut = self.client.fetch(new_request, raise_error=False) + fut.add_done_callback(lambda f: final_callback(f.result())) self._on_end_request() return if self.request.streaming_callback: @@ -544,6 +542,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): self.code, reason=getattr(self, 'reason', None), headers=self.headers, request_time=self.io_loop.time() - self.start_time, + start_time=self.start_wall_time, buffer=buffer, effective_url=self.request.url) self._run_callback(response) diff --git a/server/www/packages/packages-common/tornado/speedups.cp37-win32.pyd b/server/www/packages/packages-common/tornado/speedups.cp37-win32.pyd new file mode 100644 index 0000000..71a5eab Binary files /dev/null and b/server/www/packages/packages-common/tornado/speedups.cp37-win32.pyd differ diff --git a/server/www/packages/packages-common/tornado/stack_context.py b/server/www/packages/packages-common/tornado/stack_context.py index 61ae51f..a1eca4c 100644 --- a/server/www/packages/packages-common/tornado/stack_context.py +++ b/server/www/packages/packages-common/tornado/stack_context.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2010 Facebook # @@ -65,12 +64,18 @@ Here are a few rules of thumb for when it's necessary: persist across asynchronous calls, create a new `StackContext` (or `ExceptionStackContext`), and make your asynchronous calls in a ``with`` block that references your `StackContext`. + +.. deprecated:: 5.1 + + The ``stack_context`` package is deprecated and will be removed in + Tornado 6.0. """ from __future__ import absolute_import, division, print_function import sys import threading +import warnings from tornado.util import raise_exc_info @@ -108,6 +113,8 @@ class StackContext(object): and not necessary in most applications. """ def __init__(self, context_factory): + warnings.warn("StackContext is deprecated and will be removed in Tornado 6.0", + DeprecationWarning) self.context_factory = context_factory self.contexts = [] self.active = True @@ -175,8 +182,20 @@ class ExceptionStackContext(object): If the exception handler returns true, the exception will be consumed and will not be propagated to other exception handlers. + + .. versionadded:: 5.1 + + The ``delay_warning`` argument can be used to delay the emission + of DeprecationWarnings until an exception is caught by the + ``ExceptionStackContext``, which facilitates certain transitional + use cases. """ - def __init__(self, exception_handler): + def __init__(self, exception_handler, delay_warning=False): + self.delay_warning = delay_warning + if not self.delay_warning: + warnings.warn( + "StackContext is deprecated and will be removed in Tornado 6.0", + DeprecationWarning) self.exception_handler = exception_handler self.active = True @@ -185,6 +204,10 @@ class ExceptionStackContext(object): def exit(self, type, value, traceback): if type is not None: + if self.delay_warning: + warnings.warn( + "StackContext is deprecated and will be removed in Tornado 6.0", + DeprecationWarning) return self.exception_handler(type, value, traceback) def __enter__(self): diff --git a/server/www/packages/packages-common/tornado/tcpclient.py b/server/www/packages/packages-common/tornado/tcpclient.py index 33074bd..3a1b58c 100644 --- a/server/www/packages/packages-common/tornado/tcpclient.py +++ b/server/www/packages/packages-common/tornado/tcpclient.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2014 Facebook # @@ -20,12 +19,17 @@ from __future__ import absolute_import, division, print_function import functools import socket +import numbers +import datetime -from tornado.concurrent import Future +from tornado.concurrent import Future, future_add_done_callback from tornado.ioloop import IOLoop from tornado.iostream import IOStream from tornado import gen from tornado.netutil import Resolver +from tornado.platform.auto import set_close_exec +from tornado.gen import TimeoutError +from tornado.util import timedelta_to_seconds _INITIAL_CONNECT_TIMEOUT = 0.3 @@ -47,15 +51,17 @@ class _Connector(object): http://tools.ietf.org/html/rfc6555 """ - def __init__(self, addrinfo, io_loop, connect): - self.io_loop = io_loop + def __init__(self, addrinfo, connect): + self.io_loop = IOLoop.current() self.connect = connect self.future = Future() self.timeout = None + self.connect_timeout = None self.last_error = None self.remaining = len(addrinfo) self.primary_addrs, self.secondary_addrs = self.split(addrinfo) + self.streams = set() @staticmethod def split(addrinfo): @@ -77,9 +83,11 @@ class _Connector(object): secondary.append((af, addr)) return primary, secondary - def start(self, timeout=_INITIAL_CONNECT_TIMEOUT): + def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None): self.try_connect(iter(self.primary_addrs)) - self.set_timout(timeout) + self.set_timeout(timeout) + if connect_timeout is not None: + self.set_connect_timeout(connect_timeout) return self.future def try_connect(self, addrs): @@ -93,9 +101,10 @@ class _Connector(object): self.future.set_exception(self.last_error or IOError("connection failed")) return - future = self.connect(af, addr) - future.add_done_callback(functools.partial(self.on_connect_done, - addrs, af, addr)) + stream, future = self.connect(af, addr) + self.streams.add(stream) + future_add_done_callback( + future, functools.partial(self.on_connect_done, addrs, af, addr)) def on_connect_done(self, addrs, af, addr, future): self.remaining -= 1 @@ -114,39 +123,60 @@ class _Connector(object): self.io_loop.remove_timeout(self.timeout) self.on_timeout() return - self.clear_timeout() + self.clear_timeouts() if self.future.done(): # This is a late arrival; just drop it. stream.close() else: + self.streams.discard(stream) self.future.set_result((af, addr, stream)) + self.close_streams() - def set_timout(self, timeout): + def set_timeout(self, timeout): self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, self.on_timeout) def on_timeout(self): self.timeout = None - self.try_connect(iter(self.secondary_addrs)) + if not self.future.done(): + self.try_connect(iter(self.secondary_addrs)) def clear_timeout(self): if self.timeout is not None: self.io_loop.remove_timeout(self.timeout) + def set_connect_timeout(self, connect_timeout): + self.connect_timeout = self.io_loop.add_timeout( + connect_timeout, self.on_connect_timeout) + + def on_connect_timeout(self): + if not self.future.done(): + self.future.set_exception(TimeoutError()) + self.close_streams() + + def clear_timeouts(self): + if self.timeout is not None: + self.io_loop.remove_timeout(self.timeout) + if self.connect_timeout is not None: + self.io_loop.remove_timeout(self.connect_timeout) + + def close_streams(self): + for stream in self.streams: + stream.close() + class TCPClient(object): """A non-blocking TCP connection factory. - .. versionchanged:: 4.1 - The ``io_loop`` argument is deprecated. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ - def __init__(self, resolver=None, io_loop=None): - self.io_loop = io_loop or IOLoop.current() + def __init__(self, resolver=None): if resolver is not None: self.resolver = resolver self._own_resolver = False else: - self.resolver = Resolver(io_loop=io_loop) + self.resolver = Resolver() self._own_resolver = True def close(self): @@ -155,7 +185,8 @@ class TCPClient(object): @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, - max_buffer_size=None, source_ip=None, source_port=None): + max_buffer_size=None, source_ip=None, source_port=None, + timeout=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if @@ -167,25 +198,48 @@ class TCPClient(object): use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. + Raises `TimeoutError` if the input future does not complete before + ``timeout``, which may be specified in any form allowed by + `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time + relative to `.IOLoop.time`) + Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. .. versionchanged:: 4.5 Added the ``source_ip`` and ``source_port`` arguments. + + .. versionchanged:: 5.0 + Added the ``timeout`` argument. """ - addrinfo = yield self.resolver.resolve(host, port, af) + if timeout is not None: + if isinstance(timeout, numbers.Real): + timeout = IOLoop.current().time() + timeout + elif isinstance(timeout, datetime.timedelta): + timeout = IOLoop.current().time() + timedelta_to_seconds(timeout) + else: + raise TypeError("Unsupported timeout %r" % timeout) + if timeout is not None: + addrinfo = yield gen.with_timeout( + timeout, self.resolver.resolve(host, port, af)) + else: + addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( - addrinfo, self.io_loop, + addrinfo, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port) ) - af, addr, stream = yield connector.start() + af, addr, stream = yield connector.start(connect_timeout=timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: - stream = yield stream.start_tls(False, ssl_options=ssl_options, - server_hostname=host) + if timeout is not None: + stream = yield gen.with_timeout(timeout, stream.start_tls( + False, ssl_options=ssl_options, server_hostname=host)) + else: + stream = yield stream.start_tls(False, ssl_options=ssl_options, + server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr, source_ip=None, @@ -202,6 +256,7 @@ class TCPClient(object): # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) + set_close_exec(socket_obj.fileno()) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: @@ -212,11 +267,10 @@ class TCPClient(object): raise try: stream = IOStream(socket_obj, - io_loop=self.io_loop, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: - return stream.connect(addr) + return stream, stream.connect(addr) diff --git a/server/www/packages/packages-common/tornado/tcpserver.py b/server/www/packages/packages-common/tornado/tcpserver.py index f47ec89..4f5d6f0 100644 --- a/server/www/packages/packages-common/tornado/tcpserver.py +++ b/server/www/packages/packages-common/tornado/tcpserver.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2011 Facebook # @@ -47,12 +46,11 @@ class TCPServer(object): from tornado import gen class EchoServer(TCPServer): - @gen.coroutine - def handle_stream(self, stream, address): + async def handle_stream(self, stream, address): while True: try: - data = yield stream.read_until(b"\n") - yield stream.write(data) + data = await stream.read_until(b"\n") + await stream.write(data) except StreamClosedError: break @@ -102,12 +100,15 @@ class TCPServer(object): .. versionadded:: 3.1 The ``max_buffer_size`` argument. + + .. versionchanged:: 5.0 + The ``io_loop`` argument has been removed. """ - def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None, + def __init__(self, ssl_options=None, max_buffer_size=None, read_chunk_size=None): - self.io_loop = io_loop self.ssl_options = ssl_options - self._sockets = {} # fd -> socket object + self._sockets = {} # fd -> socket object + self._handlers = {} # fd -> remove_handler callable self._pending_sockets = [] self._started = False self._stopped = False @@ -151,13 +152,10 @@ class TCPServer(object): method and `tornado.process.fork_processes` to provide greater control over the initialization of a multi-process server. """ - if self.io_loop is None: - self.io_loop = IOLoop.current() - for sock in sockets: self._sockets[sock.fileno()] = sock - add_accept_handler(sock, self._handle_connection, - io_loop=self.io_loop) + self._handlers[sock.fileno()] = add_accept_handler( + sock, self._handle_connection) def add_socket(self, socket): """Singular version of `add_sockets`. Takes a single socket object.""" @@ -234,7 +232,8 @@ class TCPServer(object): self._stopped = True for fd, sock in self._sockets.items(): assert sock.fileno() == fd - self.io_loop.remove_handler(fd) + # Unregister socket from IOLoop + self._handlers.pop(fd)() sock.close() def handle_stream(self, stream, address): @@ -284,17 +283,17 @@ class TCPServer(object): raise try: if self.ssl_options is not None: - stream = SSLIOStream(connection, io_loop=self.io_loop, + stream = SSLIOStream(connection, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size) else: - stream = IOStream(connection, io_loop=self.io_loop, + stream = IOStream(connection, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size) future = self.handle_stream(stream, address) if future is not None: - self.io_loop.add_future(gen.convert_yielded(future), - lambda f: f.result()) + IOLoop.current().add_future(gen.convert_yielded(future), + lambda f: f.result()) except Exception: app_log.error("Error in connection callback", exc_info=True) diff --git a/server/www/packages/packages-common/tornado/template.py b/server/www/packages/packages-common/tornado/template.py index 3b2fa3f..61b9874 100644 --- a/server/www/packages/packages-common/tornado/template.py +++ b/server/www/packages/packages-common/tornado/template.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -260,9 +259,8 @@ class Template(object): :arg str template_string: the contents of the template file. :arg str name: the filename from which the template was loaded (used for error message). - :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template, - used to resolve ``{% include %}`` and ``{% extend %}`` - directives. + :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible + for this template, used to resolve ``{% include %}`` and ``{% extend %}`` directives. :arg bool compress_whitespace: Deprecated since Tornado 4.3. Equivalent to ``whitespace="single"`` if true and ``whitespace="all"`` if false. diff --git a/server/www/packages/packages-common/tornado/testing.py b/server/www/packages/packages-common/tornado/testing.py index 74d04b6..d6e5e94 100644 --- a/server/www/packages/packages-common/tornado/testing.py +++ b/server/www/packages/packages-common/tornado/testing.py @@ -1,10 +1,9 @@ -#!/usr/bin/env python """Support classes for automated testing. * `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase with additional support for testing asynchronous (`.IOLoop`-based) code. -* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy. +* `ExpectLog`: Make test logs less spammy. * `main()`: A simple test runner (wrapper around unittest.main()) with support for the tornado.autoreload module to rerun the tests when code changes. @@ -22,7 +21,7 @@ try: from tornado.process import Subprocess except ImportError: # These modules are not importable on app engine. Parts of this module - # won't work, but e.g. LogTrapTestCase and main() will. + # won't work, but e.g. main() will. AsyncHTTPClient = None # type: ignore gen = None # type: ignore HTTPServer = None # type: ignore @@ -30,7 +29,7 @@ except ImportError: netutil = None # type: ignore SimpleAsyncHTTPClient = None # type: ignore Subprocess = None # type: ignore -from tornado.log import gen_log, app_log +from tornado.log import app_log from tornado.stack_context import ExceptionStackContext from tornado.util import raise_exc_info, basestring_type, PY3 import functools @@ -42,10 +41,11 @@ import signal import socket import sys -if PY3: - from io import StringIO -else: - from cStringIO import StringIO +try: + import asyncio +except ImportError: + asyncio = None + try: from collections.abc import Generator as GeneratorType # type: ignore @@ -73,23 +73,12 @@ else: except ImportError: import unittest # type: ignore -_next_port = 10000 - -def get_unused_port(): - """Returns a (hopefully) unused port number. - - This function does not guarantee that the port it returns is available, - only that a series of get_unused_port calls in a single process return - distinct ports. - - .. deprecated:: - Use bind_unused_port instead, which is guaranteed to find an unused port. - """ - global _next_port - port = _next_port - _next_port = _next_port + 1 - return port +if asyncio is None: + _NON_OWNED_IOLOOPS = () +else: + import tornado.platform.asyncio + _NON_OWNED_IOLOOPS = tornado.platform.asyncio.AsyncIOMainLoop def bind_unused_port(reuse_port=False): @@ -155,19 +144,19 @@ class AsyncTestCase(unittest.TestCase): asynchronous code. The unittest framework is synchronous, so the test must be - complete by the time the test method returns. This means that - asynchronous code cannot be used in quite the same way as usual. - To write test functions that use the same ``yield``-based patterns - used with the `tornado.gen` module, decorate your test methods - with `tornado.testing.gen_test` instead of - `tornado.gen.coroutine`. This class also provides the `stop()` - and `wait()` methods for a more manual style of testing. The test - method itself must call ``self.wait()``, and asynchronous - callbacks should call ``self.stop()`` to signal completion. + complete by the time the test method returns. This means that + asynchronous code cannot be used in quite the same way as usual + and must be adapted to fit. To write your tests with coroutines, + decorate your test methods with `tornado.testing.gen_test` instead + of `tornado.gen.coroutine`. + + This class also provides the (deprecated) `stop()` and `wait()` + methods for a more manual style of testing. The test method itself + must call ``self.wait()``, and asynchronous callbacks should call + ``self.stop()`` to signal completion. By default, a new `.IOLoop` is constructed for each test and is available - as ``self.io_loop``. This `.IOLoop` should be used in the construction of - HTTP clients/servers, etc. If the code being tested requires a + as ``self.io_loop``. If the code being tested requires a global `.IOLoop`, subclasses should override `get_new_ioloop` to return it. The `.IOLoop`'s ``start`` and ``stop`` methods should not be @@ -182,7 +171,7 @@ class AsyncTestCase(unittest.TestCase): class MyTestCase(AsyncTestCase): @tornado.testing.gen_test def test_http_fetch(self): - client = AsyncHTTPClient(self.io_loop) + client = AsyncHTTPClient() response = yield client.fetch("http://www.tornadoweb.org") # Test contents of response self.assertIn("FriendFeed", response.body) @@ -190,27 +179,11 @@ class AsyncTestCase(unittest.TestCase): # This test uses argument passing between self.stop and self.wait. class MyTestCase2(AsyncTestCase): def test_http_fetch(self): - client = AsyncHTTPClient(self.io_loop) + client = AsyncHTTPClient() client.fetch("http://www.tornadoweb.org/", self.stop) response = self.wait() # Test contents of response self.assertIn("FriendFeed", response.body) - - # This test uses an explicit callback-based style. - class MyTestCase3(AsyncTestCase): - def test_http_fetch(self): - client = AsyncHTTPClient(self.io_loop) - client.fetch("http://www.tornadoweb.org/", self.handle_fetch) - self.wait() - - def handle_fetch(self, response): - # Test contents of response (failures and exceptions here - # will cause self.wait() to throw an exception and end the - # test). - # Exceptions thrown here are magically propagated to - # self.wait() in test_http_fetch() via stack_context. - self.assertIn("FriendFeed", response.body) - self.stop() """ def __init__(self, methodName='runTest'): super(AsyncTestCase, self).__init__(methodName) @@ -235,8 +208,7 @@ class AsyncTestCase(unittest.TestCase): # Clean up Subprocess, so it can be used again with a new ioloop. Subprocess.uninitialize() self.io_loop.clear_current() - if (not IOLoop.initialized() or - self.io_loop is not IOLoop.instance()): + if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not @@ -250,9 +222,15 @@ class AsyncTestCase(unittest.TestCase): self.__rethrow() def get_new_ioloop(self): - """Creates a new `.IOLoop` for this test. May be overridden in - subclasses for tests that require a specific `.IOLoop` (usually - the singleton `.IOLoop.instance()`). + """Returns the `.IOLoop` to use for this test. + + By default, a new `.IOLoop` is created for each test. + Subclasses may override this method to return + `.IOLoop.current()` if it is not appropriate to use a new + `.IOLoop` in each tests (for example, if there are global + singletons using the default `.IOLoop`) or if a per-test event + loop is being provided by another system (such as + ``pytest-asyncio``). """ return IOLoop() @@ -272,7 +250,7 @@ class AsyncTestCase(unittest.TestCase): raise_exc_info(failure) def run(self, result=None): - with ExceptionStackContext(self._handle_exception): + with ExceptionStackContext(self._handle_exception, delay_warning=True): super(AsyncTestCase, self).run(result) # As a last resort, if an exception escaped super.run() and wasn't # re-raised in tearDown, raise it here. This will cause the @@ -286,6 +264,10 @@ class AsyncTestCase(unittest.TestCase): Keyword arguments or a single positional argument passed to `stop()` are saved and will be returned by `wait()`. + + .. deprecated:: 5.1 + + `stop` and `wait` are deprecated; use ``@gen_test`` instead. """ assert _arg is None or not kwargs self.__stop_args = kwargs or _arg @@ -307,6 +289,10 @@ class AsyncTestCase(unittest.TestCase): .. versionchanged:: 3.1 Added the ``ASYNC_TEST_TIMEOUT`` environment variable. + + .. deprecated:: 5.1 + + `stop` and `wait` are deprecated; use ``@gen_test`` instead. """ if timeout is None: timeout = get_async_test_timeout() @@ -321,7 +307,8 @@ class AsyncTestCase(unittest.TestCase): except Exception: self.__failure = sys.exc_info() self.stop() - self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func) + self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, + timeout_func) while True: self.__running = True self.io_loop.start() @@ -382,11 +369,10 @@ class AsyncHTTPTestCase(AsyncTestCase): self.http_server.add_sockets([sock]) def get_http_client(self): - return AsyncHTTPClient(io_loop=self.io_loop) + return AsyncHTTPClient() def get_http_server(self): - return HTTPServer(self._app, io_loop=self.io_loop, - **self.get_httpserver_options()) + return HTTPServer(self._app, **self.get_httpserver_options()) def get_app(self): """Should be overridden by subclasses to return a @@ -394,16 +380,47 @@ class AsyncHTTPTestCase(AsyncTestCase): """ raise NotImplementedError() - def fetch(self, path, **kwargs): - """Convenience method to synchronously fetch a url. + def fetch(self, path, raise_error=False, **kwargs): + """Convenience method to synchronously fetch a URL. The given path will be appended to the local server's host and port. Any additional kwargs will be passed directly to `.AsyncHTTPClient.fetch` (and so could be used to pass ``method="POST"``, ``body="..."``, etc). + + If the path begins with http:// or https://, it will be treated as a + full URL and will be fetched as-is. + + If ``raise_error`` is True, a `tornado.httpclient.HTTPError` will + be raised if the response code is not 200. This is the same behavior + as the ``raise_error`` argument to `.AsyncHTTPClient.fetch`, but + the default is False here (it's True in `.AsyncHTTPClient`) because + tests often need to deal with non-200 response codes. + + .. versionchanged:: 5.0 + Added support for absolute URLs. + + .. versionchanged:: 5.1 + + Added the ``raise_error`` argument. + + .. deprecated:: 5.1 + + This method currently turns any exception into an + `.HTTPResponse` with status code 599. In Tornado 6.0, + errors other than `tornado.httpclient.HTTPError` will be + passed through, and ``raise_error=False`` will only + suppress errors that would be raised due to non-200 + response codes. + """ - self.http_client.fetch(self.get_url(path), self.stop, **kwargs) - return self.wait() + if path.lower().startswith(('http://', 'https://')): + url = path + else: + url = self.get_url(path) + return self.io_loop.run_sync( + lambda: self.http_client.fetch(url, raise_error=raise_error, **kwargs), + timeout=get_async_test_timeout()) def get_httpserver_options(self): """May be overridden by subclasses to return additional @@ -423,16 +440,14 @@ class AsyncHTTPTestCase(AsyncTestCase): def get_url(self, path): """Returns an absolute url for the given path on the test server.""" - return '%s://localhost:%s%s' % (self.get_protocol(), + return '%s://127.0.0.1:%s%s' % (self.get_protocol(), self.get_http_port(), path) def tearDown(self): self.http_server.stop() self.io_loop.run_sync(self.http_server.close_all_connections, timeout=get_async_test_timeout()) - if (not IOLoop.initialized() or - self.http_client.io_loop is not IOLoop.instance()): - self.http_client.close() + self.http_client.close() super(AsyncHTTPTestCase, self).tearDown() @@ -442,7 +457,7 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase): Interface is generally the same as `AsyncHTTPTestCase`. """ def get_http_client(self): - return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True, + return AsyncHTTPClient(force_instance=True, defaults=dict(validate_cert=False)) def get_httpserver_options(self): @@ -454,7 +469,8 @@ class AsyncHTTPSTestCase(AsyncHTTPTestCase): By default includes a self-signed testing certificate. """ # Testing keys were generated with: - # openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509 + # openssl req -new -keyout tornado/test/test.key \ + # -out tornado/test/test.crt -nodes -days 3650 -x509 module_dir = os.path.dirname(__file__) return dict( certfile=os.path.join(module_dir, 'test', 'test.crt'), @@ -476,7 +492,7 @@ def gen_test(func=None, timeout=None): class MyTest(AsyncHTTPTestCase): @gen_test def test_something(self): - response = yield gen.Task(self.fetch('/')) + response = yield self.http_client.fetch(self.get_url('/')) By default, ``@gen_test`` times out after 5 seconds. The timeout may be overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable, @@ -485,7 +501,11 @@ def gen_test(func=None, timeout=None): class MyTest(AsyncHTTPTestCase): @gen_test(timeout=10) def test_something_slow(self): - response = yield gen.Task(self.fetch('/')) + response = yield self.http_client.fetch(self.get_url('/')) + + Note that ``@gen_test`` is incompatible with `AsyncTestCase.stop`, + `AsyncTestCase.wait`, and `AsyncHTTPTestCase.fetch`. Use ``yield + self.http_client.fetch(self.get_url())`` as shown above instead. .. versionadded:: 3.1 The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment @@ -494,6 +514,7 @@ def gen_test(func=None, timeout=None): .. versionchanged:: 4.0 The wrapper now passes along ``*args, **kwargs`` so it can be used on functions with arguments. + """ if timeout is None: timeout = get_async_test_timeout() @@ -529,12 +550,17 @@ def gen_test(func=None, timeout=None): timeout=timeout) except TimeoutError as e: # run_sync raises an error with an unhelpful traceback. - # Throw it back into the generator or coroutine so the stack - # trace is replaced by the point where the test is stopped. - self._test_generator.throw(e) - # In case the test contains an overly broad except clause, - # we may get back here. In this case re-raise the original - # exception, which is better than nothing. + # If the underlying generator is still running, we can throw the + # exception back into it so the stack trace is replaced by the + # point where the test is stopped. The only reason the generator + # would not be running would be if it were cancelled, which means + # a native coroutine, so we can rely on the cr_running attribute. + if getattr(self._test_generator, 'cr_running', True): + self._test_generator.throw(e) + # In case the test contains an overly broad except + # clause, we may get back here. + # Coroutine was stopped or didn't raise a useful stack trace, + # so re-raise the original exception which is better than nothing. raise return post_coroutine @@ -554,49 +580,6 @@ def gen_test(func=None, timeout=None): gen_test.__test__ = False # type: ignore -class LogTrapTestCase(unittest.TestCase): - """A test case that captures and discards all logging output - if the test passes. - - Some libraries can produce a lot of logging output even when - the test succeeds, so this class can be useful to minimize the noise. - Simply use it as a base class for your test case. It is safe to combine - with AsyncTestCase via multiple inheritance - (``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``) - - This class assumes that only one log handler is configured and - that it is a `~logging.StreamHandler`. This is true for both - `logging.basicConfig` and the "pretty logging" configured by - `tornado.options`. It is not compatible with other log buffering - mechanisms, such as those provided by some test runners. - - .. deprecated:: 4.1 - Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`. - """ - def run(self, result=None): - logger = logging.getLogger() - if not logger.handlers: - logging.basicConfig() - handler = logger.handlers[0] - if (len(logger.handlers) > 1 or - not isinstance(handler, logging.StreamHandler)): - # Logging has been configured in a way we don't recognize, - # so just leave it alone. - super(LogTrapTestCase, self).run(result) - return - old_stream = handler.stream - try: - handler.stream = StringIO() - gen_log.info("RUNNING TEST: " + str(self)) - old_error_count = len(result.failures) + len(result.errors) - super(LogTrapTestCase, self).run(result) - new_error_count = len(result.failures) + len(result.errors) - if new_error_count != old_error_count: - old_stream.write(handler.stream.getvalue()) - finally: - handler.stream = old_stream - - class ExpectLog(logging.Filter): """Context manager to capture and suppress expected log output. @@ -684,6 +667,12 @@ def main(**kwargs): to show many test details as they are run. See http://docs.python.org/library/unittest.html#unittest.main for full argument list. + + .. versionchanged:: 5.0 + + This function produces no output of its own; only that produced + by the `unittest` module (Previously it would add a PASS or FAIL + log message). """ from tornado.options import define, options, parse_command_line @@ -719,23 +708,16 @@ def main(**kwargs): if __name__ == '__main__' and len(argv) == 1: print("No tests specified", file=sys.stderr) sys.exit(1) - try: - # In order to be able to run tests by their fully-qualified name - # on the command line without importing all tests here, - # module must be set to None. Python 3.2's unittest.main ignores - # defaultTest if no module is given (it tries to do its own - # test discovery, which is incompatible with auto2to3), so don't - # set module if we're not asking for a specific test. - if len(argv) > 1: - unittest.main(module=None, argv=argv, **kwargs) - else: - unittest.main(defaultTest="all", argv=argv, **kwargs) - except SystemExit as e: - if e.code == 0: - gen_log.info('PASS') - else: - gen_log.error('FAIL') - raise + # In order to be able to run tests by their fully-qualified name + # on the command line without importing all tests here, + # module must be set to None. Python 3.2's unittest.main ignores + # defaultTest if no module is given (it tries to do its own + # test discovery, which is incompatible with auto2to3), so don't + # set module if we're not asking for a specific test. + if len(argv) > 1: + unittest.main(module=None, argv=argv, **kwargs) + else: + unittest.main(defaultTest="all", argv=argv, **kwargs) if __name__ == '__main__': diff --git a/server/www/packages/packages-common/tornado/util.py b/server/www/packages/packages-common/tornado/util.py index 981b94c..a42ebeb 100644 --- a/server/www/packages/packages-common/tornado/util.py +++ b/server/www/packages/packages-common/tornado/util.py @@ -84,6 +84,16 @@ except ImportError: is_finalizing = _get_emulated_is_finalizing() +class TimeoutError(Exception): + """Exception raised by `.with_timeout` and `.IOLoop.run_sync`. + + .. versionchanged:: 5.0: + Unified ``tornado.gen.TimeoutError`` and + ``tornado.ioloop.TimeoutError`` as ``tornado.util.TimeoutError``. + Both former names remain as aliases. + """ + + class ObjectDict(_ObjectDictBase): """Makes a dictionary behave like an object, with attribute-style access. """ @@ -272,6 +282,12 @@ class Configurable(object): Configurable subclasses must define the class methods `configurable_base` and `configurable_default`, and use the instance method `initialize` instead of ``__init__``. + + .. versionchanged:: 5.0 + + It is now possible for configuration to be specified at + multiple levels of a class hierarchy. + """ __impl_class = None # type: type __impl_kwargs = None # type: Dict[str, Any] @@ -286,6 +302,9 @@ class Configurable(object): else: impl = cls init_kwargs.update(kwargs) + if impl.configurable_base() is not base: + # The impl class is itself configurable, so recurse. + return impl(*args, **init_kwargs) instance = super(Configurable, cls).__new__(impl) # initialize vs __init__ chosen for compatibility with AsyncHTTPClient # singleton magic. If we get rid of that we can switch to __init__ @@ -343,7 +362,10 @@ class Configurable(object): # type: () -> type """Returns the currently configured class.""" base = cls.configurable_base() - if cls.__impl_class is None: + # Manually mangle the private name to see whether this base + # has been configured (and not another base higher in the + # hierarchy). + if base.__dict__.get('_Configurable__impl_class') is None: base.__impl_class = cls.configurable_default() return base.__impl_class diff --git a/server/www/packages/packages-common/tornado/web.py b/server/www/packages/packages-common/tornado/web.py index d79889f..6760b0b 100644 --- a/server/www/packages/packages-common/tornado/web.py +++ b/server/www/packages/packages-common/tornado/web.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -47,12 +46,14 @@ Thread-safety notes ------------------- In general, methods on `RequestHandler` and elsewhere in Tornado are -not thread-safe. In particular, methods such as +not thread-safe. In particular, methods such as `~RequestHandler.write()`, `~RequestHandler.finish()`, and -`~RequestHandler.flush()` must only be called from the main thread. If +`~RequestHandler.flush()` must only be called from the main thread. If you use multiple threads it is important to use `.IOLoop.add_callback` to transfer control back to the main thread before finishing the -request. +request, or to limit your use of other threads to +`.IOLoop.run_in_executor` and ensure that your callbacks running in +the executor do not refer to Tornado objects. """ @@ -77,10 +78,11 @@ import time import tornado import traceback import types +import warnings from inspect import isclass from io import BytesIO -from tornado.concurrent import Future +from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado import escape from tornado import gen from tornado import httputil @@ -245,7 +247,7 @@ class RequestHandler(object): of the request method. Asynchronous support: Decorate this method with `.gen.coroutine` - or `.return_future` to make it asynchronous (the + or use ``async def`` to make it asynchronous (the `asynchronous` decorator cannot be used on `prepare`). If this method returns a `.Future` execution will not proceed until the `.Future` is done. @@ -309,20 +311,21 @@ class RequestHandler(object): def set_status(self, status_code, reason=None): """Sets the status code for our response. - :arg int status_code: Response status code. If ``reason`` is ``None``, - it must be present in `httplib.responses `. - :arg string reason: Human-readable reason phrase describing the status + :arg int status_code: Response status code. + :arg str reason: Human-readable reason phrase describing the status code. If ``None``, it will be filled in from - `httplib.responses `. + `http.client.responses` or "Unknown". + + .. versionchanged:: 5.0 + + No longer validates that the response code is in + `http.client.responses`. """ self._status_code = status_code if reason is not None: self._reason = escape.native_str(reason) else: - try: - self._reason = httputil.responses[status_code] - except KeyError: - raise ValueError("unknown status code %d" % status_code) + self._reason = httputil.responses.get(status_code, "Unknown") def get_status(self): """Returns the status code for our response.""" @@ -521,18 +524,32 @@ class RequestHandler(object): return self.request.cookies def get_cookie(self, name, default=None): - """Gets the value of the cookie with the given name, else default.""" + """Returns the value of the request cookie with the given name. + + If the named cookie is not present, returns ``default``. + + This method only returns cookies that were present in the request. + It does not see the outgoing cookies set by `set_cookie` in this + handler. + """ if self.request.cookies is not None and name in self.request.cookies: return self.request.cookies[name].value return default def set_cookie(self, name, value, domain=None, expires=None, path="/", expires_days=None, **kwargs): - """Sets the given cookie name/value with the given options. + """Sets an outgoing cookie name/value with the given options. - Additional keyword arguments are set on the Cookie.Morsel + Newly-set cookies are not immediately visible via `get_cookie`; + they are not present until the next request. + + expires may be a numeric timestamp as returned by `time.time`, + a time tuple as returned by `time.gmtime`, or a + `datetime.datetime` object. + + Additional keyword arguments are set on the cookies.Morsel directly. - See https://docs.python.org/2/library/cookie.html#Cookie.Morsel + See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel for available attributes. """ # The cookie library only accepts type str, in both python 2 and 3 @@ -574,6 +591,9 @@ class RequestHandler(object): path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). + + Similar to `set_cookie`, the effect of this method will not be + seen until the following request. """ expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) self.set_cookie(name, value="", path=path, expires=expires, @@ -585,6 +605,9 @@ class RequestHandler(object): See `clear_cookie` for more information on the path and domain parameters. + Similar to `set_cookie`, the effect of this method will not be + seen until the following request. + .. versionchanged:: 3.2 Added the ``path`` and ``domain`` parameters. @@ -609,6 +632,9 @@ class RequestHandler(object): Secure cookies may contain arbitrary byte values, not just unicode strings (unlike regular cookies) + Similar to `set_cookie`, the effect of this method will not be + seen until the following request. + .. versionchanged:: 3.2.1 Added the ``version`` argument. Introduced cookie version 2 @@ -648,6 +674,10 @@ class RequestHandler(object): The decoded cookie value is returned as a byte string (unlike `get_cookie`). + Similar to `get_cookie`, this method only returns cookies that + were present in the request. It does not see outgoing cookies set by + `set_secure_cookie` in this handler. + .. versionchanged:: 3.2.1 Added the ``min_version`` argument. Introduced cookie version 2; @@ -709,7 +739,8 @@ class RequestHandler(object): if not isinstance(chunk, (bytes, unicode_type, dict)): message = "write() only accepts bytes, unicode, and dict objects" if isinstance(chunk, list): - message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" + message += ". Lists not accepted for security reasons; see " + \ + "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" raise TypeError(message) if isinstance(chunk, dict): chunk = escape.json_encode(chunk) @@ -718,7 +749,18 @@ class RequestHandler(object): self._write_buffer.append(chunk) def render(self, template_name, **kwargs): - """Renders the template with the given arguments as the response.""" + """Renders the template with the given arguments as the response. + + ``render()`` calls ``finish()``, so no other output methods can be called + after it. + + Returns a `.Future` with the same semantics as the one returned by `finish`. + Awaiting this `.Future` is optional. + + .. versionchanged:: 5.1 + + Now returns a `.Future` instead of ``None``. + """ if self._finished: raise RuntimeError("Cannot render() after finish()") html = self.render_string(template_name, **kwargs) @@ -779,7 +821,7 @@ class RequestHandler(object): if html_bodies: hloc = html.index(b'') html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:] - self.finish(html) + return self.finish(html) def render_linked_js(self, js_files): """Default method used to render the final js links for the @@ -919,6 +961,11 @@ class RequestHandler(object): .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in + Tornado 6.0. """ chunk = b"".join(self._write_buffer) self._write_buffer = [] @@ -957,7 +1004,20 @@ class RequestHandler(object): return future def finish(self, chunk=None): - """Finishes this response, ending the HTTP request.""" + """Finishes this response, ending the HTTP request. + + Passing a ``chunk`` to ``finish()`` is equivalent to passing that + chunk to ``write()`` and then calling ``finish()`` with no arguments. + + Returns a `.Future` which may optionally be awaited to track the sending + of the response to the client. This `.Future` resolves when all the response + data has been sent, and raises an error if the connection is closed before all + data can be sent. + + .. versionchanged:: 5.1 + + Now returns a `.Future` instead of ``None``. + """ if self._finished: raise RuntimeError("finish() called twice") @@ -974,7 +1034,8 @@ class RequestHandler(object): if self.check_etag_header(): self._write_buffer = [] self.set_status(304) - if self._status_code in (204, 304): + if (self._status_code in (204, 304) or + (self._status_code >= 100 and self._status_code < 200)): assert not self._write_buffer, "Cannot send body with %s" % self._status_code self._clear_headers_for_304() elif "Content-Length" not in self._headers: @@ -988,12 +1049,27 @@ class RequestHandler(object): # are keepalive connections) self.request.connection.set_close_callback(None) - self.flush(include_footers=True) - self.request.finish() + future = self.flush(include_footers=True) + self.request.connection.finish() self._log() self._finished = True self.on_finish() self._break_cycles() + return future + + def detach(self): + """Take control of the underlying stream. + + Returns the underlying `.IOStream` object and stops all + further HTTP processing. Intended for implementing protocols + like websockets that tunnel over an HTTP handshake. + + This method is only supported when HTTP/1.1 is used. + + .. versionadded:: 5.1 + """ + self._finished = True + return self.request.connection.detach() def _break_cycles(self): # Break up a reference cycle between this handler and the @@ -1195,6 +1271,11 @@ class RequestHandler(object): See http://en.wikipedia.org/wiki/Cross-site_request_forgery + This property is of type `bytes`, but it contains only ASCII + characters. If a character string is required, there is no + need to base64-encode it; just decode the byte string as + UTF-8. + .. versionchanged:: 3.2.2 The xsrf token will now be have a random mask applied in every request, which makes it safe to include the token in pages @@ -1491,7 +1572,7 @@ class RequestHandler(object): if self._prepared_future is not None: # Tell the Application we've finished with prepare() # and are ready for the body to arrive. - self._prepared_future.set_result(None) + future_set_result_unless_cancelled(self._prepared_future, None) if self._finished: return @@ -1516,6 +1597,9 @@ class RequestHandler(object): self._handle_request_exception(e) except Exception: app_log.error("Exception in exception handler", exc_info=True) + finally: + # Unset result to avoid circular references + result = None if (self._prepared_future is not None and not self._prepared_future.done()): # In case we failed before setting _prepared_future, do it @@ -1561,11 +1645,7 @@ class RequestHandler(object): # send a response. return if isinstance(e, HTTPError): - if e.status_code not in httputil.responses and not e.reason: - gen_log.error("Bad HTTP status code: %d", e.status_code) - self.send_error(500, exc_info=sys.exc_info()) - else: - self.send_error(e.status_code, exc_info=sys.exc_info()) + self.send_error(e.status_code, exc_info=sys.exc_info()) else: self.send_error(500, exc_info=sys.exc_info()) @@ -1657,7 +1737,14 @@ def asynchronous(method): .. versionchanged:: 4.3 Returning anything but ``None`` or a yieldable object from a method decorated with ``@asynchronous`` is an error. Such return values were previously ignored silently. + + .. deprecated:: 5.1 + + This decorator is deprecated and will be removed in Tornado 6.0. + Use coroutines instead. """ + warnings.warn("@asynchronous is deprecated, use coroutines instead", + DeprecationWarning) # Delay the IOLoop import because it's not available on app engine. from tornado.ioloop import IOLoop @@ -1665,7 +1752,7 @@ def asynchronous(method): def wrapper(self, *args, **kwargs): self._auto_finish = False with stack_context.ExceptionStackContext( - self._stack_context_handle_exception): + self._stack_context_handle_exception, delay_warning=True): result = method(self, *args, **kwargs) if result is not None: result = gen.convert_yielded(result) @@ -1711,7 +1798,7 @@ def stream_request_body(cls): See the `file receiver demo `_ for example usage. - """ + """ # noqa: E501 if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) cls._stream_request_body = True @@ -1859,6 +1946,17 @@ class Application(ReversibleRouter): If there's no match for the current request's host, then ``default_host`` parameter value is matched against host regular expressions. + + .. warning:: + + Applications that do not use TLS may be vulnerable to :ref:`DNS + rebinding ` attacks. This attack is especially + relevant to applications that only listen on ``127.0.0.1` or + other private networks. Appropriate host patterns must be used + (instead of the default of ``r'.*'``) to prevent this risk. The + ``default_host`` argument must not be used in applications that + may be vulnerable to DNS rebinding. + You can serve static files by sending the ``static_path`` setting as a keyword argument. We will serve those files from the ``/static/`` URI (this is configurable with the @@ -1869,6 +1967,7 @@ class Application(ReversibleRouter): .. versionchanged:: 4.5 Integration with the new `tornado.routing` module. + """ def __init__(self, handlers=None, default_host=None, transforms=None, **settings): @@ -2089,7 +2188,7 @@ class _HandlerDelegate(httputil.HTTPMessageDelegate): def finish(self): if self.stream_request_body: - self.request.body.set_result(None) + future_set_result_unless_cancelled(self.request.body, None) else: self.request.body = b''.join(self.chunks) self.request._parse_body() @@ -2146,11 +2245,11 @@ class HTTPError(Exception): :arg int status_code: HTTP status code. Must be listed in `httplib.responses ` unless the ``reason`` keyword argument is given. - :arg string log_message: Message to be written to the log for this error + :arg str log_message: Message to be written to the log for this error (will not be shown to the user unless the `Application` is in debug mode). May contain ``%s``-style placeholders, which will be filled in with remaining positional parameters. - :arg string reason: Keyword-only argument. The HTTP "reason" phrase + :arg str reason: Keyword-only argument. The HTTP "reason" phrase to pass in the status line along with ``status_code``. Normally determined automatically from ``status_code``, but can be used to use a non-standard numeric code. @@ -2256,13 +2355,21 @@ class RedirectHandler(RequestHandler): .. versionchanged:: 4.5 Added support for substitutions into the destination URL. + + .. versionchanged:: 5.0 + If any query arguments are present, they will be copied to the + destination URL. """ def initialize(self, url, permanent=True): self._url = url self._permanent = permanent def get(self, *args): - self.redirect(self._url.format(*args), permanent=self._permanent) + to_url = self._url.format(*args) + if self.request.query_arguments: + to_url = httputil.url_concat( + to_url, list(httputil.qs_to_qsl(self.request.query_arguments))) + self.redirect(to_url, permanent=self._permanent) class StaticFileHandler(RequestHandler): @@ -2467,8 +2574,9 @@ class StaticFileHandler(RequestHandler): .. versionadded:: 3.1 """ - if self.check_etag_header(): - return True + # If client sent If-None-Match, use it, ignore If-Modified-Since + if self.request.headers.get('If-None-Match'): + return self.check_etag_header() # Check the If-Modified-Since, and don't send the result if the # content has not been modified @@ -2773,6 +2881,7 @@ class FallbackHandler(RequestHandler): def prepare(self): self.fallback(self.request) self._finished = True + self.on_finish() class OutputTransform(object): @@ -2786,7 +2895,7 @@ class OutputTransform(object): pass def transform_first_chunk(self, status_code, headers, chunk, finishing): - # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] + # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501 return status_code, headers, chunk def transform_chunk(self, chunk, finishing): @@ -2827,7 +2936,7 @@ class GZipContentEncoding(OutputTransform): return ctype.startswith('text/') or ctype in self.CONTENT_TYPES def transform_first_chunk(self, status_code, headers, chunk, finishing): - # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] + # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501 # TODO: can/should this type be inherited from the superclass? if 'Vary' in headers: headers['Vary'] += ', Accept-Encoding' diff --git a/server/www/packages/packages-common/tornado/websocket.py b/server/www/packages/packages-common/tornado/websocket.py index 69437ee..0b994fc 100644 --- a/server/www/packages/packages-common/tornado/websocket.py +++ b/server/www/packages/packages-common/tornado/websocket.py @@ -17,24 +17,24 @@ the protocol (known as "draft 76") and are not compatible with this module. """ from __future__ import absolute_import, division, print_function -# Author: Jacob Kristhammar, 2010 import base64 -import collections import hashlib import os +import sys import struct import tornado.escape import tornado.web import zlib -from tornado.concurrent import TracebackFuture +from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado.escape import utf8, native_str, to_unicode from tornado import gen, httpclient, httputil from tornado.ioloop import IOLoop, PeriodicCallback from tornado.iostream import StreamClosedError -from tornado.log import gen_log, app_log +from tornado.log import gen_log from tornado import simple_httpclient +from tornado.queues import Queue from tornado.tcpclient import TCPClient from tornado.util import _websocket_mask, PY3 @@ -44,6 +44,8 @@ if PY3: else: from urlparse import urlparse # py3 +_default_max_message_size = 10 * 1024 * 1024 + class WebSocketError(Exception): pass @@ -57,6 +59,10 @@ class WebSocketClosedError(WebSocketError): pass +class _DecompressTooLargeError(Exception): + pass + + class WebSocketHandler(tornado.web.RequestHandler): """Subclass this class to create a basic WebSocket handler. @@ -146,7 +152,6 @@ class WebSocketHandler(tornado.web.RequestHandler): self.stream = None self._on_close_called = False - @tornado.web.asynchronous def get(self, *args, **kwargs): self.open_args = args self.open_kwargs = kwargs @@ -226,7 +231,7 @@ class WebSocketHandler(tornado.web.RequestHandler): Default is 10MiB. """ - return self.settings.get('websocket_max_message_size', None) + return self.settings.get('websocket_max_message_size', _default_max_message_size) def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket. @@ -237,6 +242,7 @@ class WebSocketHandler(tornado.web.RequestHandler): is allowed. If the connection is already closed, raises `WebSocketClosedError`. + Returns a `.Future` which can be used for flow control. .. versionchanged:: 3.2 `WebSocketClosedError` was added (previously a closed connection @@ -244,6 +250,10 @@ class WebSocketHandler(tornado.web.RequestHandler): .. versionchanged:: 4.3 Returns a `.Future` which can be used for flow control. + + .. versionchanged:: 5.0 + Consistently raises `WebSocketClosedError`. Previously could + sometimes raise `.StreamClosedError`. """ if self.ws_connection is None: raise WebSocketClosedError() @@ -252,18 +262,38 @@ class WebSocketHandler(tornado.web.RequestHandler): return self.ws_connection.write_message(message, binary=binary) def select_subprotocol(self, subprotocols): - """Invoked when a new WebSocket requests specific subprotocols. + """Override to implement subprotocol negotiation. ``subprotocols`` is a list of strings identifying the subprotocols proposed by the client. This method may be overridden to return one of those strings to select it, or - ``None`` to not select a subprotocol. Failure to select a - subprotocol does not automatically abort the connection, - although clients may close the connection if none of their - proposed subprotocols was selected. + ``None`` to not select a subprotocol. + + Failure to select a subprotocol does not automatically abort + the connection, although clients may close the connection if + none of their proposed subprotocols was selected. + + The list may be empty, in which case this method must return + None. This method is always called exactly once even if no + subprotocols were proposed so that the handler can be advised + of this fact. + + .. versionchanged:: 5.1 + + Previously, this method was called with a list containing + an empty string instead of an empty list if no subprotocols + were proposed by the client. """ return None + @property + def selected_subprotocol(self): + """The subprotocol returned by `select_subprotocol`. + + .. versionadded:: 5.1 + """ + return self.ws_connection.selected_subprotocol + def get_compression_options(self): """Override to return compression options for the connection. @@ -294,6 +324,13 @@ class WebSocketHandler(tornado.web.RequestHandler): The arguments to `open` are extracted from the `tornado.web.URLSpec` regular expression, just like the arguments to `tornado.web.RequestHandler.get`. + + `open` may be a coroutine. `on_message` will not be called until + `open` has returned. + + .. versionchanged:: 5.1 + + ``open`` may be a coroutine. """ pass @@ -308,8 +345,23 @@ class WebSocketHandler(tornado.web.RequestHandler): """ raise NotImplementedError - def ping(self, data): - """Send ping frame to the remote end.""" + def ping(self, data=b''): + """Send ping frame to the remote end. + + The data argument allows a small amount of data (up to 125 + bytes) to be sent as a part of the ping message. Note that not + all websocket implementations expose this data to + applications. + + Consider using the ``websocket_ping_interval`` application + setting instead of sending pings manually. + + .. versionchanged:: 5.1 + + The data argument is now optional. + + """ + data = utf8(data) if self.ws_connection is None: raise WebSocketClosedError() self.ws_connection.write_ping(data) @@ -462,7 +514,7 @@ class WebSocketHandler(tornado.web.RequestHandler): self, compression_options=self.get_compression_options()) def _attach_stream(self): - self.stream = self.request.connection.detach() + self.stream = self.detach() self.stream.set_close_callback(self.on_connection_close) # disable non-WS methods for method in ["write", "redirect", "set_header", "set_cookie", @@ -493,8 +545,7 @@ class WebSocketProtocol(object): try: result = callback(*args, **kwargs) except Exception: - app_log.error("Uncaught exception in %s", - getattr(self.request, 'path', None), exc_info=True) + self.handler.log_exception(*sys.exc_info()) self._abort() else: if result is not None: @@ -539,7 +590,8 @@ class _PerMessageDeflateCompressor(object): self._compressor = None def _create_compressor(self): - return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level) + return zlib.compressobj(self._compression_level, + zlib.DEFLATED, -self._max_wbits, self._mem_level) def compress(self, data): compressor = self._compressor or self._create_compressor() @@ -550,7 +602,8 @@ class _PerMessageDeflateCompressor(object): class _PerMessageDeflateDecompressor(object): - def __init__(self, persistent, max_wbits, compression_options=None): + def __init__(self, persistent, max_wbits, max_message_size, compression_options=None): + self._max_message_size = max_message_size if max_wbits is None: max_wbits = zlib.MAX_WBITS if not (8 <= max_wbits <= zlib.MAX_WBITS): @@ -567,7 +620,10 @@ class _PerMessageDeflateDecompressor(object): def decompress(self, data): decompressor = self._decompressor or self._create_decompressor() - return decompressor.decompress(data + b'\x00\x00\xff\xff') + result = decompressor.decompress(data + b'\x00\x00\xff\xff', self._max_message_size) + if decompressor.unconsumed_tail: + raise _DecompressTooLargeError() + return result class WebSocketProtocol13(WebSocketProtocol): @@ -616,6 +672,14 @@ class WebSocketProtocol13(WebSocketProtocol): def accept_connection(self): try: self._handle_websocket_headers() + except ValueError: + self.handler.set_status(400) + log_msg = "Missing/Invalid WebSocket headers" + self.handler.finish(log_msg) + gen_log.debug(log_msg) + return + + try: self._accept_connection() except ValueError: gen_log.debug("Malformed WebSocket request received", @@ -647,14 +711,17 @@ class WebSocketProtocol13(WebSocketProtocol): return WebSocketProtocol13.compute_accept_value( self.request.headers.get("Sec-Websocket-Key")) + @gen.coroutine def _accept_connection(self): - subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '') - subprotocols = [s.strip() for s in subprotocols.split(',')] - if subprotocols: - selected = self.handler.select_subprotocol(subprotocols) - if selected: - assert selected in subprotocols - self.handler.set_header("Sec-WebSocket-Protocol", selected) + subprotocol_header = self.request.headers.get("Sec-WebSocket-Protocol") + if subprotocol_header: + subprotocols = [s.strip() for s in subprotocol_header.split(',')] + else: + subprotocols = [] + self.selected_subprotocol = self.handler.select_subprotocol(subprotocols) + if self.selected_subprotocol: + assert self.selected_subprotocol in subprotocols + self.handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol) extensions = self._parse_extensions_header(self.request.headers) for ext in extensions: @@ -684,9 +751,11 @@ class WebSocketProtocol13(WebSocketProtocol): self.stream = self.handler.stream self.start_pinging() - self._run_callback(self.handler.open, *self.handler.open_args, - **self.handler.open_kwargs) - self._receive_frame() + open_result = self._run_callback(self.handler.open, *self.handler.open_args, + **self.handler.open_kwargs) + if open_result is not None: + yield open_result + yield self._receive_frame_loop() def _parse_extensions_header(self, headers): extensions = headers.get("Sec-WebSocket-Extensions", '') @@ -713,6 +782,8 @@ class WebSocketProtocol13(WebSocketProtocol): else: raise ValueError("unsupported extension %r", ext) + self.selected_subprotocol = headers.get('Sec-WebSocket-Protocol', None) + def _get_compressor_options(self, side, agreed_parameters, compression_options=None): """Converts a websocket agreed_parameters set to keyword arguments for our compressor objects. @@ -740,34 +811,39 @@ class WebSocketProtocol13(WebSocketProtocol): self._compressor = _PerMessageDeflateCompressor( **self._get_compressor_options(side, agreed_parameters, compression_options)) self._decompressor = _PerMessageDeflateDecompressor( + max_message_size=self.handler.max_message_size, **self._get_compressor_options(other_side, agreed_parameters, compression_options)) def _write_frame(self, fin, opcode, data, flags=0): + data_len = len(data) + if opcode & 0x8: + # All control frames MUST have a payload length of 125 + # bytes or less and MUST NOT be fragmented. + if not fin: + raise ValueError("control frames may not be fragmented") + if data_len > 125: + raise ValueError("control frame payloads may not exceed 125 bytes") if fin: finbit = self.FIN else: finbit = 0 frame = struct.pack("B", finbit | opcode | flags) - l = len(data) if self.mask_outgoing: mask_bit = 0x80 else: mask_bit = 0 - if l < 126: - frame += struct.pack("B", l | mask_bit) - elif l <= 0xFFFF: - frame += struct.pack("!BH", 126 | mask_bit, l) + if data_len < 126: + frame += struct.pack("B", data_len | mask_bit) + elif data_len <= 0xFFFF: + frame += struct.pack("!BH", 126 | mask_bit, data_len) else: - frame += struct.pack("!BQ", 127 | mask_bit, l) + frame += struct.pack("!BQ", 127 | mask_bit, data_len) if self.mask_outgoing: mask = os.urandom(4) data = mask + _websocket_mask(mask, data) frame += data self._wire_bytes_out += len(frame) - try: - return self.stream.write(frame) - except StreamClosedError: - self._abort() + return self.stream.write(frame) def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket.""" @@ -782,118 +858,107 @@ class WebSocketProtocol13(WebSocketProtocol): if self._compressor: message = self._compressor.compress(message) flags |= self.RSV1 - return self._write_frame(True, opcode, message, flags=flags) + # For historical reasons, write methods in Tornado operate in a semi-synchronous + # mode in which awaiting the Future they return is optional (But errors can + # still be raised). This requires us to go through an awkward dance here + # to transform the errors that may be returned while presenting the same + # semi-synchronous interface. + try: + fut = self._write_frame(True, opcode, message, flags=flags) + except StreamClosedError: + raise WebSocketClosedError() + + @gen.coroutine + def wrapper(): + try: + yield fut + except StreamClosedError: + raise WebSocketClosedError() + return wrapper() def write_ping(self, data): """Send ping frame.""" assert isinstance(data, bytes) self._write_frame(True, 0x9, data) - def _receive_frame(self): + @gen.coroutine + def _receive_frame_loop(self): try: - self.stream.read_bytes(2, self._on_frame_start) + while not self.client_terminated: + yield self._receive_frame() except StreamClosedError: self._abort() - def _on_frame_start(self, data): - self._wire_bytes_in += len(data) - header, payloadlen = struct.unpack("BB", data) - self._final_frame = header & self.FIN + def _read_bytes(self, n): + self._wire_bytes_in += n + return self.stream.read_bytes(n) + + @gen.coroutine + def _receive_frame(self): + # Read the frame header. + data = yield self._read_bytes(2) + header, mask_payloadlen = struct.unpack("BB", data) + is_final_frame = header & self.FIN reserved_bits = header & self.RSV_MASK - self._frame_opcode = header & self.OPCODE_MASK - self._frame_opcode_is_control = self._frame_opcode & 0x8 - if self._decompressor is not None and self._frame_opcode != 0: + opcode = header & self.OPCODE_MASK + opcode_is_control = opcode & 0x8 + if self._decompressor is not None and opcode != 0: + # Compression flag is present in the first frame's header, + # but we can't decompress until we have all the frames of + # the message. self._frame_compressed = bool(reserved_bits & self.RSV1) reserved_bits &= ~self.RSV1 if reserved_bits: # client is using as-yet-undefined extensions; abort self._abort() return - self._masked_frame = bool(payloadlen & 0x80) - payloadlen = payloadlen & 0x7f - if self._frame_opcode_is_control and payloadlen >= 126: + is_masked = bool(mask_payloadlen & 0x80) + payloadlen = mask_payloadlen & 0x7f + + # Parse and validate the length. + if opcode_is_control and payloadlen >= 126: # control frames must have payload < 126 self._abort() return - try: - if payloadlen < 126: - self._frame_length = payloadlen - if self._masked_frame: - self.stream.read_bytes(4, self._on_masking_key) - else: - self._read_frame_data(False) - elif payloadlen == 126: - self.stream.read_bytes(2, self._on_frame_length_16) - elif payloadlen == 127: - self.stream.read_bytes(8, self._on_frame_length_64) - except StreamClosedError: - self._abort() - - def _read_frame_data(self, masked): - new_len = self._frame_length + if payloadlen < 126: + self._frame_length = payloadlen + elif payloadlen == 126: + data = yield self._read_bytes(2) + payloadlen = struct.unpack("!H", data)[0] + elif payloadlen == 127: + data = yield self._read_bytes(8) + payloadlen = struct.unpack("!Q", data)[0] + new_len = payloadlen if self._fragmented_message_buffer is not None: new_len += len(self._fragmented_message_buffer) - if new_len > (self.handler.max_message_size or 10 * 1024 * 1024): + if new_len > self.handler.max_message_size: self.close(1009, "message too big") + self._abort() return - self.stream.read_bytes( - self._frame_length, - self._on_masked_frame_data if masked else self._on_frame_data) - def _on_frame_length_16(self, data): - self._wire_bytes_in += len(data) - self._frame_length = struct.unpack("!H", data)[0] - try: - if self._masked_frame: - self.stream.read_bytes(4, self._on_masking_key) - else: - self._read_frame_data(False) - except StreamClosedError: - self._abort() + # Read the payload, unmasking if necessary. + if is_masked: + self._frame_mask = yield self._read_bytes(4) + data = yield self._read_bytes(payloadlen) + if is_masked: + data = _websocket_mask(self._frame_mask, data) - def _on_frame_length_64(self, data): - self._wire_bytes_in += len(data) - self._frame_length = struct.unpack("!Q", data)[0] - try: - if self._masked_frame: - self.stream.read_bytes(4, self._on_masking_key) - else: - self._read_frame_data(False) - except StreamClosedError: - self._abort() - - def _on_masking_key(self, data): - self._wire_bytes_in += len(data) - self._frame_mask = data - try: - self._read_frame_data(True) - except StreamClosedError: - self._abort() - - def _on_masked_frame_data(self, data): - # Don't touch _wire_bytes_in; we'll do it in _on_frame_data. - self._on_frame_data(_websocket_mask(self._frame_mask, data)) - - def _on_frame_data(self, data): - handled_future = None - - self._wire_bytes_in += len(data) - if self._frame_opcode_is_control: + # Decide what to do with this frame. + if opcode_is_control: # control frames may be interleaved with a series of fragmented # data frames, so control frames must not interact with # self._fragmented_* - if not self._final_frame: + if not is_final_frame: # control frames must not be fragmented self._abort() return - opcode = self._frame_opcode - elif self._frame_opcode == 0: # continuation frame + elif opcode == 0: # continuation frame if self._fragmented_message_buffer is None: # nothing to continue self._abort() return self._fragmented_message_buffer += data - if self._final_frame: + if is_final_frame: opcode = self._fragmented_message_opcode data = self._fragmented_message_buffer self._fragmented_message_buffer = None @@ -902,22 +967,14 @@ class WebSocketProtocol13(WebSocketProtocol): # can't start new message until the old one is finished self._abort() return - if self._final_frame: - opcode = self._frame_opcode - else: - self._fragmented_message_opcode = self._frame_opcode + if not is_final_frame: + self._fragmented_message_opcode = opcode self._fragmented_message_buffer = data - if self._final_frame: + if is_final_frame: handled_future = self._handle_message(opcode, data) - - if not self.client_terminated: - if handled_future: - # on_message is a coroutine, process more frames once it's done. - handled_future.add_done_callback( - lambda future: self._receive_frame()) - else: - self._receive_frame() + if handled_future is not None: + yield handled_future def _handle_message(self, opcode, data): """Execute on_message, returning its Future if it is a coroutine.""" @@ -925,7 +982,12 @@ class WebSocketProtocol13(WebSocketProtocol): return if self._frame_compressed: - data = self._decompressor.decompress(data) + try: + data = self._decompressor.decompress(data) + except _DecompressTooLargeError: + self.close(1009, "message too big after decompression") + self._abort() + return if opcode == 0x1: # UTF-8 data @@ -951,7 +1013,10 @@ class WebSocketProtocol13(WebSocketProtocol): self.close(self.handler.close_code) elif opcode == 0x9: # Ping - self._write_frame(True, 0xA, data) + try: + self._write_frame(True, 0xA, data) + except StreamClosedError: + self._abort() self._run_callback(self.handler.on_ping, data) elif opcode == 0xA: # Pong @@ -972,7 +1037,10 @@ class WebSocketProtocol13(WebSocketProtocol): close_data = struct.pack('>H', code) if reason is not None: close_data += utf8(reason) - self._write_frame(True, 0x8, close_data) + try: + self._write_frame(True, 0x8, close_data) + except StreamClosedError: + self._abort() self.server_terminated = True if self.client_terminated: if self._waiting is not None: @@ -1037,14 +1105,13 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): This class should not be instantiated directly; use the `websocket_connect` function instead. """ - def __init__(self, io_loop, request, on_message_callback=None, + def __init__(self, request, on_message_callback=None, compression_options=None, ping_interval=None, ping_timeout=None, - max_message_size=None): + max_message_size=None, subprotocols=[]): self.compression_options = compression_options - self.connect_future = TracebackFuture() + self.connect_future = Future() self.protocol = None - self.read_future = None - self.read_queue = collections.deque() + self.read_queue = Queue(1) self.key = base64.b64encode(os.urandom(16)) self._on_message_callback = on_message_callback self.close_code = self.close_reason = None @@ -1061,6 +1128,8 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): 'Sec-WebSocket-Key': self.key, 'Sec-WebSocket-Version': '13', }) + if subprotocols is not None: + request.headers['Sec-WebSocket-Protocol'] = ','.join(subprotocols) if self.compression_options is not None: # Always offer to let the server set our max_wbits (and even though # we don't offer it, we will accept a client_no_context_takeover @@ -1070,9 +1139,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): request.headers['Sec-WebSocket-Extensions'] = ( 'permessage-deflate; client_max_window_bits') - self.tcp_client = TCPClient(io_loop=io_loop) + self.tcp_client = TCPClient() super(WebSocketClientConnection, self).__init__( - io_loop, None, request, lambda: None, self._on_http_response, + None, request, lambda: None, self._on_http_response, 104857600, self.tcp_client, 65536, 104857600) def close(self, code=None, reason=None): @@ -1115,7 +1184,7 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): self.protocol = self.get_websocket_protocol() self.protocol._process_server_headers(self.key, self.headers) self.protocol.start_pinging() - self.protocol._receive_frame() + IOLoop.current().add_callback(self.protocol._receive_frame_loop) if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) @@ -1129,11 +1198,19 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): # ability to see exceptions. self.final_callback = None - self.connect_future.set_result(self) + future_set_result_unless_cancelled(self.connect_future, self) def write_message(self, message, binary=False): - """Sends a message to the WebSocket server.""" - return self.protocol.write_message(message, binary) + """Sends a message to the WebSocket server. + + If the stream is closed, raises `WebSocketClosedError`. + Returns a `.Future` which can be used for flow control. + + .. versionchanged:: 5.0 + Exception raised on a closed stream changed from `.StreamClosedError` + to `WebSocketClosedError`. + """ + return self.protocol.write_message(message, binary=binary) def read_message(self, callback=None): """Reads a message from the WebSocket server. @@ -1146,12 +1223,8 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): is given it will be called with the future when it is ready. """ - assert self.read_future is None - future = TracebackFuture() - if self.read_queue: - future.set_result(self.read_queue.popleft()) - else: - self.read_future = future + + future = self.read_queue.get() if callback is not None: self.io_loop.add_future(future, callback) return future @@ -1159,11 +1232,27 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): def on_message(self, message): if self._on_message_callback: self._on_message_callback(message) - elif self.read_future is not None: - self.read_future.set_result(message) - self.read_future = None else: - self.read_queue.append(message) + return self.read_queue.put(message) + + def ping(self, data=b''): + """Send ping frame to the remote end. + + The data argument allows a small amount of data (up to 125 + bytes) to be sent as a part of the ping message. Note that not + all websocket implementations expose this data to + applications. + + Consider using the ``ping_interval`` argument to + `websocket_connect` instead of sending pings manually. + + .. versionadded:: 5.1 + + """ + data = utf8(data) + if self.protocol is None: + raise WebSocketClosedError() + self.protocol.write_ping(data) def on_pong(self, data): pass @@ -1175,11 +1264,19 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): return WebSocketProtocol13(self, mask_outgoing=True, compression_options=self.compression_options) + @property + def selected_subprotocol(self): + """The subprotocol selected by the server. -def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None, + .. versionadded:: 5.1 + """ + return self.protocol.selected_subprotocol + + +def websocket_connect(url, callback=None, connect_timeout=None, on_message_callback=None, compression_options=None, ping_interval=None, ping_timeout=None, - max_message_size=None): + max_message_size=_default_max_message_size, subprotocols=None): """Client-side websocket support. Takes a url and returns a Future whose result is a @@ -1202,19 +1299,27 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None, ``websocket_connect``. In both styles, a message of ``None`` indicates that the connection has been closed. + ``subprotocols`` may be a list of strings specifying proposed + subprotocols. The selected protocol may be found on the + ``selected_subprotocol`` attribute of the connection object + when the connection is complete. + .. versionchanged:: 3.2 Also accepts ``HTTPRequest`` objects in place of urls. .. versionchanged:: 4.1 Added ``compression_options`` and ``on_message_callback``. - The ``io_loop`` argument is deprecated. .. versionchanged:: 4.5 Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size`` arguments, which have the same meaning as in `WebSocketHandler`. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. versionchanged:: 5.1 + Added the ``subprotocols`` argument. """ - if io_loop is None: - io_loop = IOLoop.current() if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url @@ -1225,12 +1330,13 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None, request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout) request = httpclient._RequestProxy( request, httpclient.HTTPRequest._DEFAULTS) - conn = WebSocketClientConnection(io_loop, request, + conn = WebSocketClientConnection(request, on_message_callback=on_message_callback, compression_options=compression_options, ping_interval=ping_interval, ping_timeout=ping_timeout, - max_message_size=max_message_size) + max_message_size=max_message_size, + subprotocols=subprotocols) if callback is not None: - io_loop.add_future(conn.connect_future, callback) + IOLoop.current().add_future(conn.connect_future, callback) return conn.connect_future diff --git a/server/www/packages/packages-common/tornado/wsgi.py b/server/www/packages/packages-common/tornado/wsgi.py index 68a7615..e1230da 100644 --- a/server/www/packages/packages-common/tornado/wsgi.py +++ b/server/www/packages/packages-common/tornado/wsgi.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2009 Facebook # @@ -34,6 +33,7 @@ from __future__ import absolute_import, division, print_function import sys from io import BytesIO import tornado +import warnings from tornado.concurrent import Future from tornado import escape @@ -77,6 +77,7 @@ class WSGIApplication(web.Application): .. deprecated:: 4.0 Use a regular `.Application` and wrap it in `WSGIAdapter` instead. + This class will be removed in Tornado 6.0. """ def __call__(self, environ, start_response): return WSGIAdapter(self)(environ, start_response) @@ -84,8 +85,10 @@ class WSGIApplication(web.Application): # WSGI has no facilities for flow control, so just return an already-done # Future when the interface requires it. -_dummy_future = Future() -_dummy_future.set_result(None) +def _dummy_future(): + f = Future() + f.set_result(None) + return f class _WSGIConnection(httputil.HTTPConnection): @@ -117,7 +120,7 @@ class _WSGIConnection(httputil.HTTPConnection): self.write(chunk, callback) elif callback is not None: callback() - return _dummy_future + return _dummy_future() def write(self, chunk, callback=None): if self._expected_content_remaining is not None: @@ -129,7 +132,7 @@ class _WSGIConnection(httputil.HTTPConnection): self._write_buffer.append(chunk) if callback is not None: callback() - return _dummy_future + return _dummy_future() def finish(self): if (self._expected_content_remaining is not None and @@ -180,9 +183,25 @@ class WSGIAdapter(object): that it is not possible to use `.AsyncHTTPClient`, or the `tornado.auth` or `tornado.websocket` modules. + In multithreaded WSGI servers on Python 3, it may be necessary to + permit `asyncio` to create event loops on any thread. Run the + following at startup (typically import time for WSGI + applications):: + + import asyncio + from tornado.platform.asyncio import AnyThreadEventLoopPolicy + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + .. versionadded:: 4.0 + + .. deprecated:: 5.1 + + This class is deprecated and will be removed in Tornado 6.0. + Use Tornado's `.HTTPServer` instead of a WSGI container. """ def __init__(self, application): + warnings.warn("WSGIAdapter is deprecated, use Tornado's HTTPServer instead", + DeprecationWarning) if isinstance(application, WSGIApplication): self.application = lambda request: web.Application.__call__( application, request) diff --git a/server/www/teleport/webroot/app/base/webapp.py b/server/www/teleport/webroot/app/base/webapp.py index 05f8186..654715d 100644 --- a/server/www/teleport/webroot/app/base/webapp.py +++ b/server/www/teleport/webroot/app/base/webapp.py @@ -5,12 +5,14 @@ import os import time import urllib.parse import urllib.request +import asyncio import tornado.httpserver import tornado.ioloop import tornado.netutil import tornado.process import tornado.web +import tornado.platform.asyncio from app.const import * from app.base.configs import tp_cfg from app.base.db import get_db @@ -30,6 +32,8 @@ class WebApp: def init(self, path_app_root, path_data): log.initialize() + asyncio.set_event_loop_policy(tornado.platform.asyncio.AnyThreadEventLoopPolicy()) + cfg = tp_cfg() cfg.app_path = path_app_root cfg.static_path = os.path.join(path_app_root, 'static')