diff --git a/dist/client/windows/assist/installer.nsi b/dist/client/windows/assist/installer.nsi index a4cb5cf..cb017b6 100644 Binary files a/dist/client/windows/assist/installer.nsi and b/dist/client/windows/assist/installer.nsi differ diff --git a/server/www/packages/packages-windows/x86/cryptography/__about__.py b/server/www/packages/packages-windows/x86/cryptography/__about__.py new file mode 100644 index 0000000..a99f58f --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/__about__.py @@ -0,0 +1,23 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] + +__title__ = "cryptography" +__summary__ = ("cryptography is a package which provides cryptographic recipes" + " and primitives to Python developers.") +__uri__ = "https://github.com/pyca/cryptography" + +__version__ = "2.3.1" + +__author__ = "The cryptography developers" +__email__ = "cryptography-dev@python.org" + +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2013-2017 {0}".format(__author__) diff --git a/server/www/packages/packages-windows/x86/cryptography/__init__.py b/server/www/packages/packages-windows/x86/cryptography/__init__.py new file mode 100644 index 0000000..6da0b38 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/__init__.py @@ -0,0 +1,16 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.__about__ import ( + __author__, __copyright__, __email__, __license__, __summary__, __title__, + __uri__, __version__ +) + + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] diff --git a/server/www/packages/packages-windows/x86/cryptography/exceptions.py b/server/www/packages/packages-windows/x86/cryptography/exceptions.py new file mode 100644 index 0000000..648cf9d --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/exceptions.py @@ -0,0 +1,57 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from enum import Enum + + +class _Reasons(Enum): + BACKEND_MISSING_INTERFACE = 0 + UNSUPPORTED_HASH = 1 + UNSUPPORTED_CIPHER = 2 + UNSUPPORTED_PADDING = 3 + UNSUPPORTED_MGF = 4 + UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5 + UNSUPPORTED_ELLIPTIC_CURVE = 6 + UNSUPPORTED_SERIALIZATION = 7 + UNSUPPORTED_X509 = 8 + UNSUPPORTED_EXCHANGE_ALGORITHM = 9 + UNSUPPORTED_DIFFIE_HELLMAN = 10 + + +class UnsupportedAlgorithm(Exception): + def __init__(self, message, reason=None): + super(UnsupportedAlgorithm, self).__init__(message) + self._reason = reason + + +class AlreadyFinalized(Exception): + pass + + +class AlreadyUpdated(Exception): + pass + + +class NotYetFinalized(Exception): + pass + + +class InvalidTag(Exception): + pass + + +class InvalidSignature(Exception): + pass + + +class InternalError(Exception): + def __init__(self, msg, err_code): + super(InternalError, self).__init__(msg) + self.err_code = err_code + + +class InvalidKey(Exception): + pass diff --git a/server/www/packages/packages-windows/x86/cryptography/fernet.py b/server/www/packages/packages-windows/x86/cryptography/fernet.py new file mode 100644 index 0000000..ac2dd0b --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/fernet.py @@ -0,0 +1,173 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import base64 +import binascii +import os +import struct +import time + +import six + +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, padding +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +from cryptography.hazmat.primitives.hmac import HMAC + + +class InvalidToken(Exception): + pass + + +_MAX_CLOCK_SKEW = 60 + + +class Fernet(object): + def __init__(self, key, backend=None): + if backend is None: + backend = default_backend() + + key = base64.urlsafe_b64decode(key) + if len(key) != 32: + raise ValueError( + "Fernet key must be 32 url-safe base64-encoded bytes." + ) + + self._signing_key = key[:16] + self._encryption_key = key[16:] + self._backend = backend + + @classmethod + def generate_key(cls): + return base64.urlsafe_b64encode(os.urandom(32)) + + def encrypt(self, data): + current_time = int(time.time()) + iv = os.urandom(16) + return self._encrypt_from_parts(data, current_time, iv) + + def _encrypt_from_parts(self, data, current_time, iv): + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + + padder = padding.PKCS7(algorithms.AES.block_size).padder() + padded_data = padder.update(data) + padder.finalize() + encryptor = Cipher( + algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend + ).encryptor() + ciphertext = encryptor.update(padded_data) + encryptor.finalize() + + basic_parts = ( + b"\x80" + struct.pack(">Q", current_time) + iv + ciphertext + ) + + h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend) + h.update(basic_parts) + hmac = h.finalize() + return base64.urlsafe_b64encode(basic_parts + hmac) + + def decrypt(self, token, ttl=None): + timestamp, data = Fernet._get_unverified_token_data(token) + return self._decrypt_data(data, timestamp, ttl) + + def extract_timestamp(self, token): + timestamp, data = Fernet._get_unverified_token_data(token) + # Verify the token was not tampered with. + self._verify_signature(data) + return timestamp + + @staticmethod + def _get_unverified_token_data(token): + if not isinstance(token, bytes): + raise TypeError("token must be bytes.") + + try: + data = base64.urlsafe_b64decode(token) + except (TypeError, binascii.Error): + raise InvalidToken + + if not data or six.indexbytes(data, 0) != 0x80: + raise InvalidToken + + try: + timestamp, = struct.unpack(">Q", data[1:9]) + except struct.error: + raise InvalidToken + return timestamp, data + + def _verify_signature(self, data): + h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend) + h.update(data[:-32]) + try: + h.verify(data[-32:]) + except InvalidSignature: + raise InvalidToken + + def _decrypt_data(self, data, timestamp, ttl): + current_time = int(time.time()) + if ttl is not None: + if timestamp + ttl < current_time: + raise InvalidToken + + if current_time + _MAX_CLOCK_SKEW < timestamp: + raise InvalidToken + + self._verify_signature(data) + + iv = data[9:25] + ciphertext = data[25:-32] + decryptor = Cipher( + algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend + ).decryptor() + plaintext_padded = decryptor.update(ciphertext) + try: + plaintext_padded += decryptor.finalize() + except ValueError: + raise InvalidToken + unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder() + + unpadded = unpadder.update(plaintext_padded) + try: + unpadded += unpadder.finalize() + except ValueError: + raise InvalidToken + return unpadded + + +class MultiFernet(object): + def __init__(self, fernets): + fernets = list(fernets) + if not fernets: + raise ValueError( + "MultiFernet requires at least one Fernet instance" + ) + self._fernets = fernets + + def encrypt(self, msg): + return self._fernets[0].encrypt(msg) + + def rotate(self, msg): + timestamp, data = Fernet._get_unverified_token_data(msg) + for f in self._fernets: + try: + p = f._decrypt_data(data, timestamp, None) + break + except InvalidToken: + pass + else: + raise InvalidToken + + iv = os.urandom(16) + return self._fernets[0]._encrypt_from_parts(p, timestamp, iv) + + def decrypt(self, msg, ttl=None): + for f in self._fernets: + try: + return f.decrypt(msg, ttl) + except InvalidToken: + pass + raise InvalidToken diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/__init__.py new file mode 100644 index 0000000..9f06a99 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/__init__.py @@ -0,0 +1,11 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +Hazardous Materials + +This is a "Hazardous Materials" module. You should ONLY use it if you're +100% absolutely sure that you know what you're doing because this module +is full of land mines, dragons, and dinosaurs with laser guns. +""" +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/__init__.py new file mode 100644 index 0000000..565bde7 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/__init__.py @@ -0,0 +1,18 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +_default_backend = None + + +def default_backend(): + global _default_backend + + if _default_backend is None: + from cryptography.hazmat.backends.openssl.backend import backend + _default_backend = backend + + return _default_backend diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/interfaces.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/interfaces.py new file mode 100644 index 0000000..0a476b9 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/interfaces.py @@ -0,0 +1,395 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class CipherBackend(object): + @abc.abstractmethod + def cipher_supported(self, cipher, mode): + """ + Return True if the given cipher and mode are supported. + """ + + @abc.abstractmethod + def create_symmetric_encryption_ctx(self, cipher, mode): + """ + Get a CipherContext that can be used for encryption. + """ + + @abc.abstractmethod + def create_symmetric_decryption_ctx(self, cipher, mode): + """ + Get a CipherContext that can be used for decryption. + """ + + +@six.add_metaclass(abc.ABCMeta) +class HashBackend(object): + @abc.abstractmethod + def hash_supported(self, algorithm): + """ + Return True if the hash algorithm is supported by this backend. + """ + + @abc.abstractmethod + def create_hash_ctx(self, algorithm): + """ + Create a HashContext for calculating a message digest. + """ + + +@six.add_metaclass(abc.ABCMeta) +class HMACBackend(object): + @abc.abstractmethod + def hmac_supported(self, algorithm): + """ + Return True if the hash algorithm is supported for HMAC by this + backend. + """ + + @abc.abstractmethod + def create_hmac_ctx(self, key, algorithm): + """ + Create a MACContext for calculating a message authentication code. + """ + + +@six.add_metaclass(abc.ABCMeta) +class CMACBackend(object): + @abc.abstractmethod + def cmac_algorithm_supported(self, algorithm): + """ + Returns True if the block cipher is supported for CMAC by this backend + """ + + @abc.abstractmethod + def create_cmac_ctx(self, algorithm): + """ + Create a MACContext for calculating a message authentication code. + """ + + +@six.add_metaclass(abc.ABCMeta) +class PBKDF2HMACBackend(object): + @abc.abstractmethod + def pbkdf2_hmac_supported(self, algorithm): + """ + Return True if the hash algorithm is supported for PBKDF2 by this + backend. + """ + + @abc.abstractmethod + def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations, + key_material): + """ + Return length bytes derived from provided PBKDF2 parameters. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RSABackend(object): + @abc.abstractmethod + def generate_rsa_private_key(self, public_exponent, key_size): + """ + Generate an RSAPrivateKey instance with public_exponent and a modulus + of key_size bits. + """ + + @abc.abstractmethod + def rsa_padding_supported(self, padding): + """ + Returns True if the backend supports the given padding options. + """ + + @abc.abstractmethod + def generate_rsa_parameters_supported(self, public_exponent, key_size): + """ + Returns True if the backend supports the given parameters for key + generation. + """ + + @abc.abstractmethod + def load_rsa_private_numbers(self, numbers): + """ + Returns an RSAPrivateKey provider. + """ + + @abc.abstractmethod + def load_rsa_public_numbers(self, numbers): + """ + Returns an RSAPublicKey provider. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSABackend(object): + @abc.abstractmethod + def generate_dsa_parameters(self, key_size): + """ + Generate a DSAParameters instance with a modulus of key_size bits. + """ + + @abc.abstractmethod + def generate_dsa_private_key(self, parameters): + """ + Generate a DSAPrivateKey instance with parameters as a DSAParameters + object. + """ + + @abc.abstractmethod + def generate_dsa_private_key_and_parameters(self, key_size): + """ + Generate a DSAPrivateKey instance using key size only. + """ + + @abc.abstractmethod + def dsa_hash_supported(self, algorithm): + """ + Return True if the hash algorithm is supported by the backend for DSA. + """ + + @abc.abstractmethod + def dsa_parameters_supported(self, p, q, g): + """ + Return True if the parameters are supported by the backend for DSA. + """ + + @abc.abstractmethod + def load_dsa_private_numbers(self, numbers): + """ + Returns a DSAPrivateKey provider. + """ + + @abc.abstractmethod + def load_dsa_public_numbers(self, numbers): + """ + Returns a DSAPublicKey provider. + """ + + @abc.abstractmethod + def load_dsa_parameter_numbers(self, numbers): + """ + Returns a DSAParameters provider. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurveBackend(object): + @abc.abstractmethod + def elliptic_curve_signature_algorithm_supported( + self, signature_algorithm, curve + ): + """ + Returns True if the backend supports the named elliptic curve with the + specified signature algorithm. + """ + + @abc.abstractmethod + def elliptic_curve_supported(self, curve): + """ + Returns True if the backend supports the named elliptic curve. + """ + + @abc.abstractmethod + def generate_elliptic_curve_private_key(self, curve): + """ + Return an object conforming to the EllipticCurvePrivateKey interface. + """ + + @abc.abstractmethod + def load_elliptic_curve_public_numbers(self, numbers): + """ + Return an EllipticCurvePublicKey provider using the given numbers. + """ + + @abc.abstractmethod + def load_elliptic_curve_private_numbers(self, numbers): + """ + Return an EllipticCurvePrivateKey provider using the given numbers. + """ + + @abc.abstractmethod + def elliptic_curve_exchange_algorithm_supported(self, algorithm, curve): + """ + Returns whether the exchange algorithm is supported by this backend. + """ + + @abc.abstractmethod + def derive_elliptic_curve_private_key(self, private_value, curve): + """ + Compute the private key given the private value and curve. + """ + + +@six.add_metaclass(abc.ABCMeta) +class PEMSerializationBackend(object): + @abc.abstractmethod + def load_pem_private_key(self, data, password): + """ + Loads a private key from PEM encoded data, using the provided password + if the data is encrypted. + """ + + @abc.abstractmethod + def load_pem_public_key(self, data): + """ + Loads a public key from PEM encoded data. + """ + + @abc.abstractmethod + def load_pem_parameters(self, data): + """ + Load encryption parameters from PEM encoded data. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DERSerializationBackend(object): + @abc.abstractmethod + def load_der_private_key(self, data, password): + """ + Loads a private key from DER encoded data. Uses the provided password + if the data is encrypted. + """ + + @abc.abstractmethod + def load_der_public_key(self, data): + """ + Loads a public key from DER encoded data. + """ + + @abc.abstractmethod + def load_der_parameters(self, data): + """ + Load encryption parameters from DER encoded data. + """ + + +@six.add_metaclass(abc.ABCMeta) +class X509Backend(object): + @abc.abstractmethod + def load_pem_x509_certificate(self, data): + """ + Load an X.509 certificate from PEM encoded data. + """ + + @abc.abstractmethod + def load_der_x509_certificate(self, data): + """ + Load an X.509 certificate from DER encoded data. + """ + + @abc.abstractmethod + def load_der_x509_csr(self, data): + """ + Load an X.509 CSR from DER encoded data. + """ + + @abc.abstractmethod + def load_pem_x509_csr(self, data): + """ + Load an X.509 CSR from PEM encoded data. + """ + + @abc.abstractmethod + def create_x509_csr(self, builder, private_key, algorithm): + """ + Create and sign an X.509 CSR from a CSR builder object. + """ + + @abc.abstractmethod + def create_x509_certificate(self, builder, private_key, algorithm): + """ + Create and sign an X.509 certificate from a CertificateBuilder object. + """ + + @abc.abstractmethod + def create_x509_crl(self, builder, private_key, algorithm): + """ + Create and sign an X.509 CertificateRevocationList from a + CertificateRevocationListBuilder object. + """ + + @abc.abstractmethod + def create_x509_revoked_certificate(self, builder): + """ + Create a RevokedCertificate object from a RevokedCertificateBuilder + object. + """ + + @abc.abstractmethod + def x509_name_bytes(self, name): + """ + Compute the DER encoded bytes of an X509 Name object. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DHBackend(object): + @abc.abstractmethod + def generate_dh_parameters(self, generator, key_size): + """ + Generate a DHParameters instance with a modulus of key_size bits. + Using the given generator. Often 2 or 5. + """ + + @abc.abstractmethod + def generate_dh_private_key(self, parameters): + """ + Generate a DHPrivateKey instance with parameters as a DHParameters + object. + """ + + @abc.abstractmethod + def generate_dh_private_key_and_parameters(self, generator, key_size): + """ + Generate a DHPrivateKey instance using key size only. + Using the given generator. Often 2 or 5. + """ + + @abc.abstractmethod + def load_dh_private_numbers(self, numbers): + """ + Load a DHPrivateKey from DHPrivateNumbers + """ + + @abc.abstractmethod + def load_dh_public_numbers(self, numbers): + """ + Load a DHPublicKey from DHPublicNumbers. + """ + + @abc.abstractmethod + def load_dh_parameter_numbers(self, numbers): + """ + Load DHParameters from DHParameterNumbers. + """ + + @abc.abstractmethod + def dh_parameters_supported(self, p, g, q=None): + """ + Returns whether the backend supports DH with these parameter values. + """ + + @abc.abstractmethod + def dh_x942_serialization_supported(self): + """ + Returns True if the backend supports the serialization of DH objects + with subgroup order (q). + """ + + +@six.add_metaclass(abc.ABCMeta) +class ScryptBackend(object): + @abc.abstractmethod + def derive_scrypt(self, key_material, salt, length, n, r, p): + """ + Return bytes derived from provided Scrypt parameters. + """ diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/__init__.py new file mode 100644 index 0000000..8eadeb6 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/__init__.py @@ -0,0 +1,10 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.hazmat.backends.openssl.backend import backend + + +__all__ = ["backend"] diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/aead.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/aead.py new file mode 100644 index 0000000..9cec3e2 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/aead.py @@ -0,0 +1,159 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.exceptions import InvalidTag + + +_ENCRYPT = 1 +_DECRYPT = 0 + + +def _aead_cipher_name(cipher): + from cryptography.hazmat.primitives.ciphers.aead import ( + AESCCM, AESGCM, ChaCha20Poly1305 + ) + if isinstance(cipher, ChaCha20Poly1305): + return b"chacha20-poly1305" + elif isinstance(cipher, AESCCM): + return "aes-{0}-ccm".format(len(cipher._key) * 8).encode("ascii") + else: + assert isinstance(cipher, AESGCM) + return "aes-{0}-gcm".format(len(cipher._key) * 8).encode("ascii") + + +def _aead_setup(backend, cipher_name, key, nonce, tag, tag_len, operation): + evp_cipher = backend._lib.EVP_get_cipherbyname(cipher_name) + backend.openssl_assert(evp_cipher != backend._ffi.NULL) + ctx = backend._lib.EVP_CIPHER_CTX_new() + ctx = backend._ffi.gc(ctx, backend._lib.EVP_CIPHER_CTX_free) + res = backend._lib.EVP_CipherInit_ex( + ctx, evp_cipher, + backend._ffi.NULL, + backend._ffi.NULL, + backend._ffi.NULL, + int(operation == _ENCRYPT) + ) + backend.openssl_assert(res != 0) + res = backend._lib.EVP_CIPHER_CTX_set_key_length(ctx, len(key)) + backend.openssl_assert(res != 0) + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_IVLEN, len(nonce), + backend._ffi.NULL + ) + backend.openssl_assert(res != 0) + if operation == _DECRYPT: + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag + ) + backend.openssl_assert(res != 0) + else: + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_SET_TAG, tag_len, backend._ffi.NULL + ) + + res = backend._lib.EVP_CipherInit_ex( + ctx, + backend._ffi.NULL, + backend._ffi.NULL, + key, + nonce, + int(operation == _ENCRYPT) + ) + backend.openssl_assert(res != 0) + return ctx + + +def _set_length(backend, ctx, data_len): + intptr = backend._ffi.new("int *") + res = backend._lib.EVP_CipherUpdate( + ctx, + backend._ffi.NULL, + intptr, + backend._ffi.NULL, + data_len + ) + backend.openssl_assert(res != 0) + + +def _process_aad(backend, ctx, associated_data): + outlen = backend._ffi.new("int *") + res = backend._lib.EVP_CipherUpdate( + ctx, backend._ffi.NULL, outlen, associated_data, len(associated_data) + ) + backend.openssl_assert(res != 0) + + +def _process_data(backend, ctx, data): + outlen = backend._ffi.new("int *") + buf = backend._ffi.new("unsigned char[]", len(data)) + res = backend._lib.EVP_CipherUpdate(ctx, buf, outlen, data, len(data)) + backend.openssl_assert(res != 0) + return backend._ffi.buffer(buf, outlen[0])[:] + + +def _encrypt(backend, cipher, nonce, data, associated_data, tag_length): + from cryptography.hazmat.primitives.ciphers.aead import AESCCM + cipher_name = _aead_cipher_name(cipher) + ctx = _aead_setup( + backend, cipher_name, cipher._key, nonce, None, tag_length, _ENCRYPT + ) + # CCM requires us to pass the length of the data before processing anything + # However calling this with any other AEAD results in an error + if isinstance(cipher, AESCCM): + _set_length(backend, ctx, len(data)) + + _process_aad(backend, ctx, associated_data) + processed_data = _process_data(backend, ctx, data) + outlen = backend._ffi.new("int *") + res = backend._lib.EVP_CipherFinal_ex(ctx, backend._ffi.NULL, outlen) + backend.openssl_assert(res != 0) + backend.openssl_assert(outlen[0] == 0) + tag_buf = backend._ffi.new("unsigned char[]", tag_length) + res = backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, backend._lib.EVP_CTRL_AEAD_GET_TAG, tag_length, tag_buf + ) + backend.openssl_assert(res != 0) + tag = backend._ffi.buffer(tag_buf)[:] + + return processed_data + tag + + +def _decrypt(backend, cipher, nonce, data, associated_data, tag_length): + from cryptography.hazmat.primitives.ciphers.aead import AESCCM + if len(data) < tag_length: + raise InvalidTag + tag = data[-tag_length:] + data = data[:-tag_length] + cipher_name = _aead_cipher_name(cipher) + ctx = _aead_setup( + backend, cipher_name, cipher._key, nonce, tag, tag_length, _DECRYPT + ) + # CCM requires us to pass the length of the data before processing anything + # However calling this with any other AEAD results in an error + if isinstance(cipher, AESCCM): + _set_length(backend, ctx, len(data)) + + _process_aad(backend, ctx, associated_data) + # CCM has a different error path if the tag doesn't match. Errors are + # raised in Update and Final is irrelevant. + if isinstance(cipher, AESCCM): + outlen = backend._ffi.new("int *") + buf = backend._ffi.new("unsigned char[]", len(data)) + res = backend._lib.EVP_CipherUpdate(ctx, buf, outlen, data, len(data)) + if res != 1: + backend._consume_errors() + raise InvalidTag + + processed_data = backend._ffi.buffer(buf, outlen[0])[:] + else: + processed_data = _process_data(backend, ctx, data) + outlen = backend._ffi.new("int *") + res = backend._lib.EVP_CipherFinal_ex(ctx, backend._ffi.NULL, outlen) + if res == 0: + backend._consume_errors() + raise InvalidTag + + return processed_data diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/backend.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/backend.py new file mode 100644 index 0000000..af14bfa --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/backend.py @@ -0,0 +1,1974 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import base64 +import calendar +import collections +import contextlib +import itertools +from contextlib import contextmanager + +import six + +from cryptography import utils, x509 +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.backends.interfaces import ( + CMACBackend, CipherBackend, DERSerializationBackend, DHBackend, DSABackend, + EllipticCurveBackend, HMACBackend, HashBackend, PBKDF2HMACBackend, + PEMSerializationBackend, RSABackend, ScryptBackend, X509Backend +) +from cryptography.hazmat.backends.openssl import aead +from cryptography.hazmat.backends.openssl.ciphers import _CipherContext +from cryptography.hazmat.backends.openssl.cmac import _CMACContext +from cryptography.hazmat.backends.openssl.decode_asn1 import _Integers +from cryptography.hazmat.backends.openssl.dh import ( + _DHParameters, _DHPrivateKey, _DHPublicKey, _dh_params_dup +) +from cryptography.hazmat.backends.openssl.dsa import ( + _DSAParameters, _DSAPrivateKey, _DSAPublicKey +) +from cryptography.hazmat.backends.openssl.ec import ( + _EllipticCurvePrivateKey, _EllipticCurvePublicKey +) +from cryptography.hazmat.backends.openssl.encode_asn1 import ( + _CRL_ENTRY_EXTENSION_ENCODE_HANDLERS, + _CRL_EXTENSION_ENCODE_HANDLERS, _EXTENSION_ENCODE_HANDLERS, + _encode_asn1_int_gc, _encode_asn1_str_gc, _encode_name_gc, _txt2obj_gc, +) +from cryptography.hazmat.backends.openssl.hashes import _HashContext +from cryptography.hazmat.backends.openssl.hmac import _HMACContext +from cryptography.hazmat.backends.openssl.rsa import ( + _RSAPrivateKey, _RSAPublicKey +) +from cryptography.hazmat.backends.openssl.x25519 import ( + _X25519PrivateKey, _X25519PublicKey +) +from cryptography.hazmat.backends.openssl.x509 import ( + _Certificate, _CertificateRevocationList, + _CertificateSigningRequest, _RevokedCertificate +) +from cryptography.hazmat.bindings.openssl import binding +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa +from cryptography.hazmat.primitives.asymmetric.padding import ( + MGF1, OAEP, PKCS1v15, PSS +) +from cryptography.hazmat.primitives.ciphers.algorithms import ( + AES, ARC4, Blowfish, CAST5, Camellia, ChaCha20, IDEA, SEED, TripleDES +) +from cryptography.hazmat.primitives.ciphers.modes import ( + CBC, CFB, CFB8, CTR, ECB, GCM, OFB, XTS +) +from cryptography.hazmat.primitives.kdf import scrypt + + +_MemoryBIO = collections.namedtuple("_MemoryBIO", ["bio", "char_ptr"]) + + +@utils.register_interface(CipherBackend) +@utils.register_interface(CMACBackend) +@utils.register_interface(DERSerializationBackend) +@utils.register_interface(DHBackend) +@utils.register_interface(DSABackend) +@utils.register_interface(EllipticCurveBackend) +@utils.register_interface(HashBackend) +@utils.register_interface(HMACBackend) +@utils.register_interface(PBKDF2HMACBackend) +@utils.register_interface(RSABackend) +@utils.register_interface(PEMSerializationBackend) +@utils.register_interface(X509Backend) +@utils.register_interface_if( + binding.Binding().lib.Cryptography_HAS_SCRYPT, ScryptBackend +) +class Backend(object): + """ + OpenSSL API binding interfaces. + """ + name = "openssl" + + def __init__(self): + self._binding = binding.Binding() + self._ffi = self._binding.ffi + self._lib = self._binding.lib + + self._cipher_registry = {} + self._register_default_ciphers() + self.activate_osrandom_engine() + self._dh_types = [self._lib.EVP_PKEY_DH] + if self._lib.Cryptography_HAS_EVP_PKEY_DHX: + self._dh_types.append(self._lib.EVP_PKEY_DHX) + + def openssl_assert(self, ok): + return binding._openssl_assert(self._lib, ok) + + def activate_builtin_random(self): + # Obtain a new structural reference. + e = self._lib.ENGINE_get_default_RAND() + if e != self._ffi.NULL: + self._lib.ENGINE_unregister_RAND(e) + # Reset the RNG to use the new engine. + self._lib.RAND_cleanup() + # decrement the structural reference from get_default_RAND + res = self._lib.ENGINE_finish(e) + self.openssl_assert(res == 1) + + @contextlib.contextmanager + def _get_osurandom_engine(self): + # Fetches an engine by id and returns it. This creates a structural + # reference. + e = self._lib.ENGINE_by_id(self._binding._osrandom_engine_id) + self.openssl_assert(e != self._ffi.NULL) + # Initialize the engine for use. This adds a functional reference. + res = self._lib.ENGINE_init(e) + self.openssl_assert(res == 1) + + try: + yield e + finally: + # Decrement the structural ref incremented by ENGINE_by_id. + res = self._lib.ENGINE_free(e) + self.openssl_assert(res == 1) + # Decrement the functional ref incremented by ENGINE_init. + res = self._lib.ENGINE_finish(e) + self.openssl_assert(res == 1) + + def activate_osrandom_engine(self): + # Unregister and free the current engine. + self.activate_builtin_random() + with self._get_osurandom_engine() as e: + # Set the engine as the default RAND provider. + res = self._lib.ENGINE_set_default_RAND(e) + self.openssl_assert(res == 1) + # Reset the RNG to use the new engine. + self._lib.RAND_cleanup() + + def osrandom_engine_implementation(self): + buf = self._ffi.new("char[]", 64) + with self._get_osurandom_engine() as e: + res = self._lib.ENGINE_ctrl_cmd(e, b"get_implementation", + len(buf), buf, + self._ffi.NULL, 0) + self.openssl_assert(res > 0) + return self._ffi.string(buf).decode('ascii') + + def openssl_version_text(self): + """ + Friendly string name of the loaded OpenSSL library. This is not + necessarily the same version as it was compiled against. + + Example: OpenSSL 1.0.1e 11 Feb 2013 + """ + return self._ffi.string( + self._lib.OpenSSL_version(self._lib.OPENSSL_VERSION) + ).decode("ascii") + + def openssl_version_number(self): + return self._lib.OpenSSL_version_num() + + def create_hmac_ctx(self, key, algorithm): + return _HMACContext(self, key, algorithm) + + def _build_openssl_digest_name(self, algorithm): + if algorithm.name == "blake2b" or algorithm.name == "blake2s": + alg = "{0}{1}".format( + algorithm.name, algorithm.digest_size * 8 + ).encode("ascii") + else: + alg = algorithm.name.encode("ascii") + + return alg + + def hash_supported(self, algorithm): + name = self._build_openssl_digest_name(algorithm) + digest = self._lib.EVP_get_digestbyname(name) + return digest != self._ffi.NULL + + def hmac_supported(self, algorithm): + return self.hash_supported(algorithm) + + def create_hash_ctx(self, algorithm): + return _HashContext(self, algorithm) + + def cipher_supported(self, cipher, mode): + try: + adapter = self._cipher_registry[type(cipher), type(mode)] + except KeyError: + return False + evp_cipher = adapter(self, cipher, mode) + return self._ffi.NULL != evp_cipher + + def register_cipher_adapter(self, cipher_cls, mode_cls, adapter): + if (cipher_cls, mode_cls) in self._cipher_registry: + raise ValueError("Duplicate registration for: {0} {1}.".format( + cipher_cls, mode_cls) + ) + self._cipher_registry[cipher_cls, mode_cls] = adapter + + def _register_default_ciphers(self): + for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8, GCM]: + self.register_cipher_adapter( + AES, + mode_cls, + GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}") + ) + for mode_cls in [CBC, CTR, ECB, OFB, CFB]: + self.register_cipher_adapter( + Camellia, + mode_cls, + GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}") + ) + for mode_cls in [CBC, CFB, CFB8, OFB]: + self.register_cipher_adapter( + TripleDES, + mode_cls, + GetCipherByName("des-ede3-{mode.name}") + ) + self.register_cipher_adapter( + TripleDES, + ECB, + GetCipherByName("des-ede3") + ) + for mode_cls in [CBC, CFB, OFB, ECB]: + self.register_cipher_adapter( + Blowfish, + mode_cls, + GetCipherByName("bf-{mode.name}") + ) + for mode_cls in [CBC, CFB, OFB, ECB]: + self.register_cipher_adapter( + SEED, + mode_cls, + GetCipherByName("seed-{mode.name}") + ) + for cipher_cls, mode_cls in itertools.product( + [CAST5, IDEA], + [CBC, OFB, CFB, ECB], + ): + self.register_cipher_adapter( + cipher_cls, + mode_cls, + GetCipherByName("{cipher.name}-{mode.name}") + ) + self.register_cipher_adapter( + ARC4, + type(None), + GetCipherByName("rc4") + ) + self.register_cipher_adapter( + ChaCha20, + type(None), + GetCipherByName("chacha20") + ) + self.register_cipher_adapter(AES, XTS, _get_xts_cipher) + + def create_symmetric_encryption_ctx(self, cipher, mode): + return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT) + + def create_symmetric_decryption_ctx(self, cipher, mode): + return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT) + + def pbkdf2_hmac_supported(self, algorithm): + return self.hmac_supported(algorithm) + + def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations, + key_material): + buf = self._ffi.new("unsigned char[]", length) + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode("ascii")) + self.openssl_assert(evp_md != self._ffi.NULL) + res = self._lib.PKCS5_PBKDF2_HMAC( + key_material, + len(key_material), + salt, + len(salt), + iterations, + evp_md, + length, + buf + ) + self.openssl_assert(res == 1) + return self._ffi.buffer(buf)[:] + + def _consume_errors(self): + return binding._consume_errors(self._lib) + + def _bn_to_int(self, bn): + assert bn != self._ffi.NULL + + if not six.PY2: + # Python 3 has constant time from_bytes, so use that. + bn_num_bytes = self._lib.BN_num_bytes(bn) + bin_ptr = self._ffi.new("unsigned char[]", bn_num_bytes) + bin_len = self._lib.BN_bn2bin(bn, bin_ptr) + # A zero length means the BN has value 0 + self.openssl_assert(bin_len >= 0) + return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big") + else: + # Under Python 2 the best we can do is hex() + hex_cdata = self._lib.BN_bn2hex(bn) + self.openssl_assert(hex_cdata != self._ffi.NULL) + hex_str = self._ffi.string(hex_cdata) + self._lib.OPENSSL_free(hex_cdata) + return int(hex_str, 16) + + def _int_to_bn(self, num, bn=None): + """ + Converts a python integer to a BIGNUM. The returned BIGNUM will not + be garbage collected (to support adding them to structs that take + ownership of the object). Be sure to register it for GC if it will + be discarded after use. + """ + assert bn is None or bn != self._ffi.NULL + + if bn is None: + bn = self._ffi.NULL + + if not six.PY2: + # Python 3 has constant time to_bytes, so use that. + + binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), "big") + bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn) + self.openssl_assert(bn_ptr != self._ffi.NULL) + return bn_ptr + + else: + # Under Python 2 the best we can do is hex(), [2:] removes the 0x + # prefix. + hex_num = hex(num).rstrip("L")[2:].encode("ascii") + bn_ptr = self._ffi.new("BIGNUM **") + bn_ptr[0] = bn + res = self._lib.BN_hex2bn(bn_ptr, hex_num) + self.openssl_assert(res != 0) + self.openssl_assert(bn_ptr[0] != self._ffi.NULL) + return bn_ptr[0] + + def generate_rsa_private_key(self, public_exponent, key_size): + rsa._verify_rsa_parameters(public_exponent, key_size) + + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + + bn = self._int_to_bn(public_exponent) + bn = self._ffi.gc(bn, self._lib.BN_free) + + res = self._lib.RSA_generate_key_ex( + rsa_cdata, key_size, bn, self._ffi.NULL + ) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPrivateKey(self, rsa_cdata, evp_pkey) + + def generate_rsa_parameters_supported(self, public_exponent, key_size): + return (public_exponent >= 3 and public_exponent & 1 != 0 and + key_size >= 512) + + def load_rsa_private_numbers(self, numbers): + rsa._check_private_key_components( + numbers.p, + numbers.q, + numbers.d, + numbers.dmp1, + numbers.dmq1, + numbers.iqmp, + numbers.public_numbers.e, + numbers.public_numbers.n + ) + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + p = self._int_to_bn(numbers.p) + q = self._int_to_bn(numbers.q) + d = self._int_to_bn(numbers.d) + dmp1 = self._int_to_bn(numbers.dmp1) + dmq1 = self._int_to_bn(numbers.dmq1) + iqmp = self._int_to_bn(numbers.iqmp) + e = self._int_to_bn(numbers.public_numbers.e) + n = self._int_to_bn(numbers.public_numbers.n) + res = self._lib.RSA_set0_factors(rsa_cdata, p, q) + self.openssl_assert(res == 1) + res = self._lib.RSA_set0_key(rsa_cdata, n, e, d) + self.openssl_assert(res == 1) + res = self._lib.RSA_set0_crt_params(rsa_cdata, dmp1, dmq1, iqmp) + self.openssl_assert(res == 1) + res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPrivateKey(self, rsa_cdata, evp_pkey) + + def load_rsa_public_numbers(self, numbers): + rsa._check_public_key_components(numbers.e, numbers.n) + rsa_cdata = self._lib.RSA_new() + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + e = self._int_to_bn(numbers.e) + n = self._int_to_bn(numbers.n) + res = self._lib.RSA_set0_key(rsa_cdata, n, e, self._ffi.NULL) + self.openssl_assert(res == 1) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + + def _create_evp_pkey_gc(self): + evp_pkey = self._lib.EVP_PKEY_new() + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return evp_pkey + + def _rsa_cdata_to_evp_pkey(self, rsa_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_RSA(evp_pkey, rsa_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def _bytes_to_bio(self, data): + """ + Return a _MemoryBIO namedtuple of (BIO, char*). + + The char* is the storage for the BIO and it must stay alive until the + BIO is finished with. + """ + data_char_p = self._ffi.new("char[]", data) + bio = self._lib.BIO_new_mem_buf( + data_char_p, len(data) + ) + self.openssl_assert(bio != self._ffi.NULL) + + return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p) + + def _create_mem_bio_gc(self): + """ + Creates an empty memory BIO. + """ + bio_method = self._lib.BIO_s_mem() + self.openssl_assert(bio_method != self._ffi.NULL) + bio = self._lib.BIO_new(bio_method) + self.openssl_assert(bio != self._ffi.NULL) + bio = self._ffi.gc(bio, self._lib.BIO_free) + return bio + + def _read_mem_bio(self, bio): + """ + Reads a memory BIO. This only works on memory BIOs. + """ + buf = self._ffi.new("char **") + buf_len = self._lib.BIO_get_mem_data(bio, buf) + self.openssl_assert(buf_len > 0) + self.openssl_assert(buf[0] != self._ffi.NULL) + bio_data = self._ffi.buffer(buf[0], buf_len)[:] + return bio_data + + def _evp_pkey_to_private_key(self, evp_pkey): + """ + Return the appropriate type of PrivateKey given an evp_pkey cdata + pointer. + """ + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + + if key_type == self._lib.EVP_PKEY_RSA: + rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey) + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + return _RSAPrivateKey(self, rsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_DSA: + dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey) + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + return _DSAPrivateKey(self, dsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_EC: + ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + elif key_type in self._dh_types: + dh_cdata = self._lib.EVP_PKEY_get1_DH(evp_pkey) + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHPrivateKey(self, dh_cdata, evp_pkey) + else: + raise UnsupportedAlgorithm("Unsupported key type.") + + def _evp_pkey_to_public_key(self, evp_pkey): + """ + Return the appropriate type of PublicKey given an evp_pkey cdata + pointer. + """ + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + + if key_type == self._lib.EVP_PKEY_RSA: + rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey) + self.openssl_assert(rsa_cdata != self._ffi.NULL) + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_DSA: + dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey) + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + return _DSAPublicKey(self, dsa_cdata, evp_pkey) + elif key_type == self._lib.EVP_PKEY_EC: + ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + elif key_type in self._dh_types: + dh_cdata = self._lib.EVP_PKEY_get1_DH(evp_pkey) + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHPublicKey(self, dh_cdata, evp_pkey) + else: + raise UnsupportedAlgorithm("Unsupported key type.") + + def _oaep_hash_supported(self, algorithm): + if self._lib.Cryptography_HAS_RSA_OAEP_MD: + return isinstance( + algorithm, ( + hashes.SHA1, + hashes.SHA224, + hashes.SHA256, + hashes.SHA384, + hashes.SHA512, + ) + ) + else: + return isinstance(algorithm, hashes.SHA1) + + def rsa_padding_supported(self, padding): + if isinstance(padding, PKCS1v15): + return True + elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1): + return self.hash_supported(padding._mgf._algorithm) + elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1): + return ( + self._oaep_hash_supported(padding._mgf._algorithm) and + self._oaep_hash_supported(padding._algorithm) and + ( + (padding._label is None or len(padding._label) == 0) or + self._lib.Cryptography_HAS_RSA_OAEP_LABEL == 1 + ) + ) + else: + return False + + def generate_dsa_parameters(self, key_size): + if key_size not in (1024, 2048, 3072): + raise ValueError("Key size must be 1024 or 2048 or 3072 bits.") + + ctx = self._lib.DSA_new() + self.openssl_assert(ctx != self._ffi.NULL) + ctx = self._ffi.gc(ctx, self._lib.DSA_free) + + res = self._lib.DSA_generate_parameters_ex( + ctx, key_size, self._ffi.NULL, 0, + self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + + self.openssl_assert(res == 1) + + return _DSAParameters(self, ctx) + + def generate_dsa_private_key(self, parameters): + ctx = self._lib.DSAparams_dup(parameters._dsa_cdata) + self.openssl_assert(ctx != self._ffi.NULL) + ctx = self._ffi.gc(ctx, self._lib.DSA_free) + self._lib.DSA_generate_key(ctx) + evp_pkey = self._dsa_cdata_to_evp_pkey(ctx) + + return _DSAPrivateKey(self, ctx, evp_pkey) + + def generate_dsa_private_key_and_parameters(self, key_size): + parameters = self.generate_dsa_parameters(key_size) + return self.generate_dsa_private_key(parameters) + + def _dsa_cdata_set_values(self, dsa_cdata, p, q, g, pub_key, priv_key): + res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g) + self.openssl_assert(res == 1) + res = self._lib.DSA_set0_key(dsa_cdata, pub_key, priv_key) + self.openssl_assert(res == 1) + + def load_dsa_private_numbers(self, numbers): + dsa._check_dsa_private_numbers(numbers) + parameter_numbers = numbers.public_numbers.parameter_numbers + + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(parameter_numbers.p) + q = self._int_to_bn(parameter_numbers.q) + g = self._int_to_bn(parameter_numbers.g) + pub_key = self._int_to_bn(numbers.public_numbers.y) + priv_key = self._int_to_bn(numbers.x) + self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key) + + evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata) + + return _DSAPrivateKey(self, dsa_cdata, evp_pkey) + + def load_dsa_public_numbers(self, numbers): + dsa._check_dsa_parameters(numbers.parameter_numbers) + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(numbers.parameter_numbers.p) + q = self._int_to_bn(numbers.parameter_numbers.q) + g = self._int_to_bn(numbers.parameter_numbers.g) + pub_key = self._int_to_bn(numbers.y) + priv_key = self._ffi.NULL + self._dsa_cdata_set_values(dsa_cdata, p, q, g, pub_key, priv_key) + + evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata) + + return _DSAPublicKey(self, dsa_cdata, evp_pkey) + + def load_dsa_parameter_numbers(self, numbers): + dsa._check_dsa_parameters(numbers) + dsa_cdata = self._lib.DSA_new() + self.openssl_assert(dsa_cdata != self._ffi.NULL) + dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free) + + p = self._int_to_bn(numbers.p) + q = self._int_to_bn(numbers.q) + g = self._int_to_bn(numbers.g) + res = self._lib.DSA_set0_pqg(dsa_cdata, p, q, g) + self.openssl_assert(res == 1) + + return _DSAParameters(self, dsa_cdata) + + def _dsa_cdata_to_evp_pkey(self, dsa_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_DSA(evp_pkey, dsa_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def dsa_hash_supported(self, algorithm): + return self.hash_supported(algorithm) + + def dsa_parameters_supported(self, p, q, g): + return True + + def cmac_algorithm_supported(self, algorithm): + return self.cipher_supported( + algorithm, CBC(b"\x00" * algorithm.block_size) + ) + + def create_cmac_ctx(self, algorithm): + return _CMACContext(self, algorithm) + + def create_x509_csr(self, builder, private_key, algorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError('Algorithm must be a registered hash algorithm.') + + if ( + isinstance(algorithm, hashes.MD5) and not + isinstance(private_key, rsa.RSAPrivateKey) + ): + raise ValueError( + "MD5 is not a supported hash algorithm for EC/DSA CSRs" + ) + + # Resolve the signature algorithm. + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode('ascii') + ) + self.openssl_assert(evp_md != self._ffi.NULL) + + # Create an empty request. + x509_req = self._lib.X509_REQ_new() + self.openssl_assert(x509_req != self._ffi.NULL) + x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) + + # Set x509 version. + res = self._lib.X509_REQ_set_version(x509_req, x509.Version.v1.value) + self.openssl_assert(res == 1) + + # Set subject name. + res = self._lib.X509_REQ_set_subject_name( + x509_req, _encode_name_gc(self, builder._subject_name) + ) + self.openssl_assert(res == 1) + + # Set subject public key. + public_key = private_key.public_key() + res = self._lib.X509_REQ_set_pubkey( + x509_req, public_key._evp_pkey + ) + self.openssl_assert(res == 1) + + # Add extensions. + sk_extension = self._lib.sk_X509_EXTENSION_new_null() + self.openssl_assert(sk_extension != self._ffi.NULL) + sk_extension = self._ffi.gc( + sk_extension, self._lib.sk_X509_EXTENSION_free + ) + # gc is not necessary for CSRs, as sk_X509_EXTENSION_free + # will release all the X509_EXTENSIONs. + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_EXTENSION_ENCODE_HANDLERS, + x509_obj=sk_extension, + add_func=self._lib.sk_X509_EXTENSION_insert, + gc=False + ) + res = self._lib.X509_REQ_add_extensions(x509_req, sk_extension) + self.openssl_assert(res == 1) + + # Sign the request using the requester's private key. + res = self._lib.X509_REQ_sign( + x509_req, private_key._evp_pkey, evp_md + ) + if res == 0: + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_RSA, + self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY + ) + ) + + raise ValueError("Digest too big for RSA key") + + return _CertificateSigningRequest(self, x509_req) + + def create_x509_certificate(self, builder, private_key, algorithm): + if not isinstance(builder, x509.CertificateBuilder): + raise TypeError('Builder type mismatch.') + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError('Algorithm must be a registered hash algorithm.') + + if ( + isinstance(algorithm, hashes.MD5) and not + isinstance(private_key, rsa.RSAPrivateKey) + ): + raise ValueError( + "MD5 is not a supported hash algorithm for EC/DSA certificates" + ) + + # Resolve the signature algorithm. + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode('ascii') + ) + self.openssl_assert(evp_md != self._ffi.NULL) + + # Create an empty certificate. + x509_cert = self._lib.X509_new() + x509_cert = self._ffi.gc(x509_cert, backend._lib.X509_free) + + # Set the x509 version. + res = self._lib.X509_set_version(x509_cert, builder._version.value) + self.openssl_assert(res == 1) + + # Set the subject's name. + res = self._lib.X509_set_subject_name( + x509_cert, _encode_name_gc(self, builder._subject_name) + ) + self.openssl_assert(res == 1) + + # Set the subject's public key. + res = self._lib.X509_set_pubkey( + x509_cert, builder._public_key._evp_pkey + ) + self.openssl_assert(res == 1) + + # Set the certificate serial number. + serial_number = _encode_asn1_int_gc(self, builder._serial_number) + res = self._lib.X509_set_serialNumber(x509_cert, serial_number) + self.openssl_assert(res == 1) + + # Set the "not before" time. + res = self._lib.ASN1_TIME_set( + self._lib.X509_get_notBefore(x509_cert), + calendar.timegm(builder._not_valid_before.timetuple()) + ) + if res == self._ffi.NULL: + self._raise_time_set_error() + + # Set the "not after" time. + res = self._lib.ASN1_TIME_set( + self._lib.X509_get_notAfter(x509_cert), + calendar.timegm(builder._not_valid_after.timetuple()) + ) + if res == self._ffi.NULL: + self._raise_time_set_error() + + # Add extensions. + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_EXTENSION_ENCODE_HANDLERS, + x509_obj=x509_cert, + add_func=self._lib.X509_add_ext, + gc=True + ) + + # Set the issuer name. + res = self._lib.X509_set_issuer_name( + x509_cert, _encode_name_gc(self, builder._issuer_name) + ) + self.openssl_assert(res == 1) + + # Sign the certificate with the issuer's private key. + res = self._lib.X509_sign( + x509_cert, private_key._evp_pkey, evp_md + ) + if res == 0: + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_RSA, + self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY + ) + ) + raise ValueError("Digest too big for RSA key") + + return _Certificate(self, x509_cert) + + def _raise_time_set_error(self): + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_ASN1, + self._lib.ASN1_R_ERROR_GETTING_TIME + ) + ) + raise ValueError( + "Invalid time. This error can occur if you set a time too far in " + "the future on Windows." + ) + + def create_x509_crl(self, builder, private_key, algorithm): + if not isinstance(builder, x509.CertificateRevocationListBuilder): + raise TypeError('Builder type mismatch.') + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError('Algorithm must be a registered hash algorithm.') + + if ( + isinstance(algorithm, hashes.MD5) and not + isinstance(private_key, rsa.RSAPrivateKey) + ): + raise ValueError( + "MD5 is not a supported hash algorithm for EC/DSA CRLs" + ) + + evp_md = self._lib.EVP_get_digestbyname( + algorithm.name.encode('ascii') + ) + self.openssl_assert(evp_md != self._ffi.NULL) + + # Create an empty CRL. + x509_crl = self._lib.X509_CRL_new() + x509_crl = self._ffi.gc(x509_crl, backend._lib.X509_CRL_free) + + # Set the x509 CRL version. We only support v2 (integer value 1). + res = self._lib.X509_CRL_set_version(x509_crl, 1) + self.openssl_assert(res == 1) + + # Set the issuer name. + res = self._lib.X509_CRL_set_issuer_name( + x509_crl, _encode_name_gc(self, builder._issuer_name) + ) + self.openssl_assert(res == 1) + + # Set the last update time. + last_update = self._lib.ASN1_TIME_set( + self._ffi.NULL, calendar.timegm(builder._last_update.timetuple()) + ) + self.openssl_assert(last_update != self._ffi.NULL) + last_update = self._ffi.gc(last_update, self._lib.ASN1_TIME_free) + res = self._lib.X509_CRL_set_lastUpdate(x509_crl, last_update) + self.openssl_assert(res == 1) + + # Set the next update time. + next_update = self._lib.ASN1_TIME_set( + self._ffi.NULL, calendar.timegm(builder._next_update.timetuple()) + ) + self.openssl_assert(next_update != self._ffi.NULL) + next_update = self._ffi.gc(next_update, self._lib.ASN1_TIME_free) + res = self._lib.X509_CRL_set_nextUpdate(x509_crl, next_update) + self.openssl_assert(res == 1) + + # Add extensions. + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_CRL_EXTENSION_ENCODE_HANDLERS, + x509_obj=x509_crl, + add_func=self._lib.X509_CRL_add_ext, + gc=True + ) + + # add revoked certificates + for revoked_cert in builder._revoked_certificates: + # Duplicating because the X509_CRL takes ownership and will free + # this memory when X509_CRL_free is called. + revoked = self._lib.Cryptography_X509_REVOKED_dup( + revoked_cert._x509_revoked + ) + self.openssl_assert(revoked != self._ffi.NULL) + res = self._lib.X509_CRL_add0_revoked(x509_crl, revoked) + self.openssl_assert(res == 1) + + res = self._lib.X509_CRL_sign( + x509_crl, private_key._evp_pkey, evp_md + ) + if res == 0: + errors = self._consume_errors() + self.openssl_assert( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_RSA, + self._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY + ) + ) + raise ValueError("Digest too big for RSA key") + + return _CertificateRevocationList(self, x509_crl) + + def _create_x509_extensions(self, extensions, handlers, x509_obj, + add_func, gc): + for i, extension in enumerate(extensions): + x509_extension = self._create_x509_extension( + handlers, extension + ) + self.openssl_assert(x509_extension != self._ffi.NULL) + + if gc: + x509_extension = self._ffi.gc( + x509_extension, self._lib.X509_EXTENSION_free + ) + res = add_func(x509_obj, x509_extension, i) + self.openssl_assert(res >= 1) + + def _create_raw_x509_extension(self, extension, value): + obj = _txt2obj_gc(self, extension.oid.dotted_string) + return self._lib.X509_EXTENSION_create_by_OBJ( + self._ffi.NULL, obj, 1 if extension.critical else 0, value + ) + + def _create_x509_extension(self, handlers, extension): + if isinstance(extension.value, x509.UnrecognizedExtension): + value = _encode_asn1_str_gc( + self, extension.value.value, len(extension.value.value) + ) + return self._create_raw_x509_extension(extension, value) + elif isinstance(extension.value, x509.TLSFeature): + asn1 = _Integers([x.value for x in extension.value]).dump() + value = _encode_asn1_str_gc(self, asn1, len(asn1)) + return self._create_raw_x509_extension(extension, value) + else: + try: + encode = handlers[extension.oid] + except KeyError: + raise NotImplementedError( + 'Extension not supported: {0}'.format(extension.oid) + ) + + ext_struct = encode(self, extension.value) + nid = self._lib.OBJ_txt2nid( + extension.oid.dotted_string.encode("ascii") + ) + backend.openssl_assert(nid != self._lib.NID_undef) + return self._lib.X509V3_EXT_i2d( + nid, 1 if extension.critical else 0, ext_struct + ) + + def create_x509_revoked_certificate(self, builder): + if not isinstance(builder, x509.RevokedCertificateBuilder): + raise TypeError('Builder type mismatch.') + + x509_revoked = self._lib.X509_REVOKED_new() + self.openssl_assert(x509_revoked != self._ffi.NULL) + x509_revoked = self._ffi.gc(x509_revoked, self._lib.X509_REVOKED_free) + serial_number = _encode_asn1_int_gc(self, builder._serial_number) + res = self._lib.X509_REVOKED_set_serialNumber( + x509_revoked, serial_number + ) + self.openssl_assert(res == 1) + rev_date = self._lib.ASN1_TIME_set( + self._ffi.NULL, + calendar.timegm(builder._revocation_date.timetuple()) + ) + self.openssl_assert(rev_date != self._ffi.NULL) + rev_date = self._ffi.gc(rev_date, self._lib.ASN1_TIME_free) + res = self._lib.X509_REVOKED_set_revocationDate(x509_revoked, rev_date) + self.openssl_assert(res == 1) + # add CRL entry extensions + self._create_x509_extensions( + extensions=builder._extensions, + handlers=_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS, + x509_obj=x509_revoked, + add_func=self._lib.X509_REVOKED_add_ext, + gc=True + ) + return _RevokedCertificate(self, None, x509_revoked) + + def load_pem_private_key(self, data, password): + return self._load_key( + self._lib.PEM_read_bio_PrivateKey, + self._evp_pkey_to_private_key, + data, + password, + ) + + def load_pem_public_key(self, data): + mem_bio = self._bytes_to_bio(data) + evp_pkey = self._lib.PEM_read_bio_PUBKEY( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if evp_pkey != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return self._evp_pkey_to_public_key(evp_pkey) + else: + # It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still + # need to check to see if it is a pure PKCS1 RSA public key (not + # embedded in a subjectPublicKeyInfo) + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + rsa_cdata = self._lib.PEM_read_bio_RSAPublicKey( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if rsa_cdata != self._ffi.NULL: + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + else: + self._handle_key_loading_error() + + def load_pem_parameters(self, data): + mem_bio = self._bytes_to_bio(data) + # only DH is supported currently + dh_cdata = self._lib.PEM_read_bio_DHparams( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + else: + self._handle_key_loading_error() + + def load_der_private_key(self, data, password): + # OpenSSL has a function called d2i_AutoPrivateKey that in theory + # handles this automatically, however it doesn't handle encrypted + # private keys. Instead we try to load the key two different ways. + # First we'll try to load it as a traditional key. + bio_data = self._bytes_to_bio(data) + key = self._evp_pkey_from_der_traditional_key(bio_data, password) + if key: + return self._evp_pkey_to_private_key(key) + else: + # Finally we try to load it with the method that handles encrypted + # PKCS8 properly. + return self._load_key( + self._lib.d2i_PKCS8PrivateKey_bio, + self._evp_pkey_to_private_key, + data, + password, + ) + + def _evp_pkey_from_der_traditional_key(self, bio_data, password): + key = self._lib.d2i_PrivateKey_bio(bio_data.bio, self._ffi.NULL) + if key != self._ffi.NULL: + key = self._ffi.gc(key, self._lib.EVP_PKEY_free) + if password is not None: + raise TypeError( + "Password was given but private key is not encrypted." + ) + + return key + else: + self._consume_errors() + return None + + def load_der_public_key(self, data): + mem_bio = self._bytes_to_bio(data) + evp_pkey = self._lib.d2i_PUBKEY_bio(mem_bio.bio, self._ffi.NULL) + if evp_pkey != self._ffi.NULL: + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + return self._evp_pkey_to_public_key(evp_pkey) + else: + # It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still + # need to check to see if it is a pure PKCS1 RSA public key (not + # embedded in a subjectPublicKeyInfo) + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + rsa_cdata = self._lib.d2i_RSAPublicKey_bio( + mem_bio.bio, self._ffi.NULL + ) + if rsa_cdata != self._ffi.NULL: + rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free) + evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata) + return _RSAPublicKey(self, rsa_cdata, evp_pkey) + else: + self._handle_key_loading_error() + + def load_der_parameters(self, data): + mem_bio = self._bytes_to_bio(data) + dh_cdata = self._lib.d2i_DHparams_bio( + mem_bio.bio, self._ffi.NULL + ) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + elif self._lib.Cryptography_HAS_EVP_PKEY_DHX: + # We check to see if the is dhx. + self._consume_errors() + res = self._lib.BIO_reset(mem_bio.bio) + self.openssl_assert(res == 1) + dh_cdata = self._lib.Cryptography_d2i_DHxparams_bio( + mem_bio.bio, self._ffi.NULL + ) + if dh_cdata != self._ffi.NULL: + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + return _DHParameters(self, dh_cdata) + + self._handle_key_loading_error() + + def load_pem_x509_certificate(self, data): + mem_bio = self._bytes_to_bio(data) + x509 = self._lib.PEM_read_bio_X509( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if x509 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load certificate") + + x509 = self._ffi.gc(x509, self._lib.X509_free) + return _Certificate(self, x509) + + def load_der_x509_certificate(self, data): + mem_bio = self._bytes_to_bio(data) + x509 = self._lib.d2i_X509_bio(mem_bio.bio, self._ffi.NULL) + if x509 == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load certificate") + + x509 = self._ffi.gc(x509, self._lib.X509_free) + return _Certificate(self, x509) + + def load_pem_x509_crl(self, data): + mem_bio = self._bytes_to_bio(data) + x509_crl = self._lib.PEM_read_bio_X509_CRL( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if x509_crl == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load CRL") + + x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free) + return _CertificateRevocationList(self, x509_crl) + + def load_der_x509_crl(self, data): + mem_bio = self._bytes_to_bio(data) + x509_crl = self._lib.d2i_X509_CRL_bio(mem_bio.bio, self._ffi.NULL) + if x509_crl == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load CRL") + + x509_crl = self._ffi.gc(x509_crl, self._lib.X509_CRL_free) + return _CertificateRevocationList(self, x509_crl) + + def load_pem_x509_csr(self, data): + mem_bio = self._bytes_to_bio(data) + x509_req = self._lib.PEM_read_bio_X509_REQ( + mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL + ) + if x509_req == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load request") + + x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) + return _CertificateSigningRequest(self, x509_req) + + def load_der_x509_csr(self, data): + mem_bio = self._bytes_to_bio(data) + x509_req = self._lib.d2i_X509_REQ_bio(mem_bio.bio, self._ffi.NULL) + if x509_req == self._ffi.NULL: + self._consume_errors() + raise ValueError("Unable to load request") + + x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free) + return _CertificateSigningRequest(self, x509_req) + + def _load_key(self, openssl_read_func, convert_func, data, password): + mem_bio = self._bytes_to_bio(data) + + if password is not None and not isinstance(password, bytes): + raise TypeError("Password must be bytes") + + userdata = self._ffi.new("CRYPTOGRAPHY_PASSWORD_DATA *") + if password is not None: + password_buf = self._ffi.new("char []", password) + userdata.password = password_buf + userdata.length = len(password) + + evp_pkey = openssl_read_func( + mem_bio.bio, + self._ffi.NULL, + self._ffi.addressof( + self._lib._original_lib, "Cryptography_pem_password_cb" + ), + userdata, + ) + + if evp_pkey == self._ffi.NULL: + if userdata.error != 0: + errors = self._consume_errors() + self.openssl_assert(errors) + if userdata.error == -1: + raise TypeError( + "Password was not given but private key is encrypted" + ) + else: + assert userdata.error == -2 + raise ValueError( + "Passwords longer than {0} bytes are not supported " + "by this backend.".format(userdata.maxsize - 1) + ) + else: + self._handle_key_loading_error() + + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + + if password is not None and userdata.called == 0: + raise TypeError( + "Password was given but private key is not encrypted.") + + assert ( + (password is not None and userdata.called == 1) or + password is None + ) + + return convert_func(evp_pkey) + + def _handle_key_loading_error(self): + errors = self._consume_errors() + + if not errors: + raise ValueError("Could not deserialize key data.") + + elif ( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EVP, self._lib.EVP_R_BAD_DECRYPT + ) or errors[0]._lib_reason_match( + self._lib.ERR_LIB_PKCS12, + self._lib.PKCS12_R_PKCS12_CIPHERFINAL_ERROR + ) + ): + raise ValueError("Bad decrypt. Incorrect password?") + + elif ( + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EVP, self._lib.EVP_R_UNKNOWN_PBE_ALGORITHM + ) or errors[0]._lib_reason_match( + self._lib.ERR_LIB_PEM, self._lib.PEM_R_UNSUPPORTED_ENCRYPTION + ) + ): + raise UnsupportedAlgorithm( + "PEM data is encrypted with an unsupported cipher", + _Reasons.UNSUPPORTED_CIPHER + ) + + elif any( + error._lib_reason_match( + self._lib.ERR_LIB_EVP, + self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM + ) + for error in errors + ): + raise ValueError("Unsupported public key algorithm.") + + else: + assert errors[0].lib in ( + self._lib.ERR_LIB_EVP, + self._lib.ERR_LIB_PEM, + self._lib.ERR_LIB_ASN1, + ) + raise ValueError("Could not deserialize key data.") + + def elliptic_curve_supported(self, curve): + try: + curve_nid = self._elliptic_curve_to_nid(curve) + except UnsupportedAlgorithm: + curve_nid = self._lib.NID_undef + + group = self._lib.EC_GROUP_new_by_curve_name(curve_nid) + + if group == self._ffi.NULL: + errors = self._consume_errors() + self.openssl_assert( + curve_nid == self._lib.NID_undef or + errors[0]._lib_reason_match( + self._lib.ERR_LIB_EC, + self._lib.EC_R_UNKNOWN_GROUP + ) + ) + return False + else: + self.openssl_assert(curve_nid != self._lib.NID_undef) + self._lib.EC_GROUP_free(group) + return True + + def elliptic_curve_signature_algorithm_supported( + self, signature_algorithm, curve + ): + # We only support ECDSA right now. + if not isinstance(signature_algorithm, ec.ECDSA): + return False + + return self.elliptic_curve_supported(curve) + + def generate_elliptic_curve_private_key(self, curve): + """ + Generate a new private key on the named curve. + """ + + if self.elliptic_curve_supported(curve): + curve_nid = self._elliptic_curve_to_nid(curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + res = self._lib.EC_KEY_generate_key(ec_cdata) + self.openssl_assert(res == 1) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + else: + raise UnsupportedAlgorithm( + "Backend object does not support {0}.".format(curve.name), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE + ) + + def load_elliptic_curve_private_numbers(self, numbers): + public = numbers.public_numbers + + curve_nid = self._elliptic_curve_to_nid(public.curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + private_value = self._ffi.gc( + self._int_to_bn(numbers.private_value), self._lib.BN_clear_free + ) + res = self._lib.EC_KEY_set_private_key(ec_cdata, private_value) + self.openssl_assert(res == 1) + + ec_cdata = self._ec_key_set_public_key_affine_coordinates( + ec_cdata, public.x, public.y) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + + def load_elliptic_curve_public_numbers(self, numbers): + curve_nid = self._elliptic_curve_to_nid(numbers.curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + ec_cdata = self._ec_key_set_public_key_affine_coordinates( + ec_cdata, numbers.x, numbers.y) + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey) + + def derive_elliptic_curve_private_key(self, private_value, curve): + curve_nid = self._elliptic_curve_to_nid(curve) + + ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid) + self.openssl_assert(ec_cdata != self._ffi.NULL) + ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free) + + get_func, group = self._ec_key_determine_group_get_func(ec_cdata) + + point = self._lib.EC_POINT_new(group) + self.openssl_assert(point != self._ffi.NULL) + point = self._ffi.gc(point, self._lib.EC_POINT_free) + + value = self._int_to_bn(private_value) + value = self._ffi.gc(value, self._lib.BN_clear_free) + + with self._tmp_bn_ctx() as bn_ctx: + res = self._lib.EC_POINT_mul(group, point, value, self._ffi.NULL, + self._ffi.NULL, bn_ctx) + self.openssl_assert(res == 1) + + bn_x = self._lib.BN_CTX_get(bn_ctx) + bn_y = self._lib.BN_CTX_get(bn_ctx) + + res = get_func(group, point, bn_x, bn_y, bn_ctx) + self.openssl_assert(res == 1) + + res = self._lib.EC_KEY_set_public_key(ec_cdata, point) + self.openssl_assert(res == 1) + private = self._int_to_bn(private_value) + private = self._ffi.gc(private, self._lib.BN_clear_free) + res = self._lib.EC_KEY_set_private_key(ec_cdata, private) + self.openssl_assert(res == 1) + + evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata) + + return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey) + + def elliptic_curve_exchange_algorithm_supported(self, algorithm, curve): + return ( + self.elliptic_curve_supported(curve) and + isinstance(algorithm, ec.ECDH) + ) + + def _ec_cdata_to_evp_pkey(self, ec_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_EC_KEY(evp_pkey, ec_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def _elliptic_curve_to_nid(self, curve): + """ + Get the NID for a curve name. + """ + + curve_aliases = { + "secp192r1": "prime192v1", + "secp256r1": "prime256v1" + } + + curve_name = curve_aliases.get(curve.name, curve.name) + + curve_nid = self._lib.OBJ_sn2nid(curve_name.encode()) + if curve_nid == self._lib.NID_undef: + raise UnsupportedAlgorithm( + "{0} is not a supported elliptic curve".format(curve.name), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE + ) + return curve_nid + + @contextmanager + def _tmp_bn_ctx(self): + bn_ctx = self._lib.BN_CTX_new() + self.openssl_assert(bn_ctx != self._ffi.NULL) + bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free) + self._lib.BN_CTX_start(bn_ctx) + try: + yield bn_ctx + finally: + self._lib.BN_CTX_end(bn_ctx) + + def _ec_key_determine_group_get_func(self, ctx): + """ + Given an EC_KEY determine the group and what function is required to + get point coordinates. + """ + self.openssl_assert(ctx != self._ffi.NULL) + + nid_two_field = self._lib.OBJ_sn2nid(b"characteristic-two-field") + self.openssl_assert(nid_two_field != self._lib.NID_undef) + + group = self._lib.EC_KEY_get0_group(ctx) + self.openssl_assert(group != self._ffi.NULL) + + method = self._lib.EC_GROUP_method_of(group) + self.openssl_assert(method != self._ffi.NULL) + + nid = self._lib.EC_METHOD_get_field_type(method) + self.openssl_assert(nid != self._lib.NID_undef) + + if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M: + get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m + else: + get_func = self._lib.EC_POINT_get_affine_coordinates_GFp + + assert get_func + + return get_func, group + + def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y): + """ + Sets the public key point in the EC_KEY context to the affine x and y + values. + """ + + if x < 0 or y < 0: + raise ValueError( + "Invalid EC key. Both x and y must be non-negative." + ) + + x = self._ffi.gc(self._int_to_bn(x), self._lib.BN_free) + y = self._ffi.gc(self._int_to_bn(y), self._lib.BN_free) + res = self._lib.EC_KEY_set_public_key_affine_coordinates(ctx, x, y) + if res != 1: + self._consume_errors() + raise ValueError("Invalid EC key.") + + return ctx + + def _private_key_bytes(self, encoding, format, encryption_algorithm, + evp_pkey, cdata): + if not isinstance(format, serialization.PrivateFormat): + raise TypeError( + "format must be an item from the PrivateFormat enum" + ) + + if not isinstance(encryption_algorithm, + serialization.KeySerializationEncryption): + raise TypeError( + "Encryption algorithm must be a KeySerializationEncryption " + "instance" + ) + + if isinstance(encryption_algorithm, serialization.NoEncryption): + password = b"" + passlen = 0 + evp_cipher = self._ffi.NULL + elif isinstance(encryption_algorithm, + serialization.BestAvailableEncryption): + # This is a curated value that we will update over time. + evp_cipher = self._lib.EVP_get_cipherbyname( + b"aes-256-cbc" + ) + password = encryption_algorithm.password + passlen = len(password) + if passlen > 1023: + raise ValueError( + "Passwords longer than 1023 bytes are not supported by " + "this backend" + ) + else: + raise ValueError("Unsupported encryption type") + + key_type = self._lib.EVP_PKEY_id(evp_pkey) + if encoding is serialization.Encoding.PEM: + if format is serialization.PrivateFormat.PKCS8: + write_bio = self._lib.PEM_write_bio_PKCS8PrivateKey + key = evp_pkey + else: + assert format is serialization.PrivateFormat.TraditionalOpenSSL + if key_type == self._lib.EVP_PKEY_RSA: + write_bio = self._lib.PEM_write_bio_RSAPrivateKey + elif key_type == self._lib.EVP_PKEY_DSA: + write_bio = self._lib.PEM_write_bio_DSAPrivateKey + else: + assert key_type == self._lib.EVP_PKEY_EC + write_bio = self._lib.PEM_write_bio_ECPrivateKey + + key = cdata + elif encoding is serialization.Encoding.DER: + if format is serialization.PrivateFormat.TraditionalOpenSSL: + if not isinstance( + encryption_algorithm, serialization.NoEncryption + ): + raise ValueError( + "Encryption is not supported for DER encoded " + "traditional OpenSSL keys" + ) + + return self._private_key_bytes_traditional_der(key_type, cdata) + else: + assert format is serialization.PrivateFormat.PKCS8 + write_bio = self._lib.i2d_PKCS8PrivateKey_bio + key = evp_pkey + else: + raise TypeError("encoding must be an item from the Encoding enum") + + bio = self._create_mem_bio_gc() + res = write_bio( + bio, + key, + evp_cipher, + password, + passlen, + self._ffi.NULL, + self._ffi.NULL + ) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def _private_key_bytes_traditional_der(self, key_type, cdata): + if key_type == self._lib.EVP_PKEY_RSA: + write_bio = self._lib.i2d_RSAPrivateKey_bio + elif key_type == self._lib.EVP_PKEY_EC: + write_bio = self._lib.i2d_ECPrivateKey_bio + else: + self.openssl_assert(key_type == self._lib.EVP_PKEY_DSA) + write_bio = self._lib.i2d_DSAPrivateKey_bio + + bio = self._create_mem_bio_gc() + res = write_bio(bio, cdata) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def _public_key_bytes(self, encoding, format, key, evp_pkey, cdata): + if not isinstance(encoding, serialization.Encoding): + raise TypeError("encoding must be an item from the Encoding enum") + + if ( + format is serialization.PublicFormat.OpenSSH or + encoding is serialization.Encoding.OpenSSH + ): + if ( + format is not serialization.PublicFormat.OpenSSH or + encoding is not serialization.Encoding.OpenSSH + ): + raise ValueError( + "OpenSSH format must be used with OpenSSH encoding" + ) + return self._openssh_public_key_bytes(key) + elif format is serialization.PublicFormat.SubjectPublicKeyInfo: + if encoding is serialization.Encoding.PEM: + write_bio = self._lib.PEM_write_bio_PUBKEY + else: + assert encoding is serialization.Encoding.DER + write_bio = self._lib.i2d_PUBKEY_bio + + key = evp_pkey + elif format is serialization.PublicFormat.PKCS1: + # Only RSA is supported here. + assert self._lib.EVP_PKEY_id(evp_pkey) == self._lib.EVP_PKEY_RSA + if encoding is serialization.Encoding.PEM: + write_bio = self._lib.PEM_write_bio_RSAPublicKey + else: + assert encoding is serialization.Encoding.DER + write_bio = self._lib.i2d_RSAPublicKey_bio + + key = cdata + else: + raise TypeError( + "format must be an item from the PublicFormat enum" + ) + + bio = self._create_mem_bio_gc() + res = write_bio(bio, key) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def _openssh_public_key_bytes(self, key): + if isinstance(key, rsa.RSAPublicKey): + public_numbers = key.public_numbers() + return b"ssh-rsa " + base64.b64encode( + serialization._ssh_write_string(b"ssh-rsa") + + serialization._ssh_write_mpint(public_numbers.e) + + serialization._ssh_write_mpint(public_numbers.n) + ) + elif isinstance(key, dsa.DSAPublicKey): + public_numbers = key.public_numbers() + parameter_numbers = public_numbers.parameter_numbers + return b"ssh-dss " + base64.b64encode( + serialization._ssh_write_string(b"ssh-dss") + + serialization._ssh_write_mpint(parameter_numbers.p) + + serialization._ssh_write_mpint(parameter_numbers.q) + + serialization._ssh_write_mpint(parameter_numbers.g) + + serialization._ssh_write_mpint(public_numbers.y) + ) + else: + assert isinstance(key, ec.EllipticCurvePublicKey) + public_numbers = key.public_numbers() + try: + curve_name = { + ec.SECP256R1: b"nistp256", + ec.SECP384R1: b"nistp384", + ec.SECP521R1: b"nistp521", + }[type(public_numbers.curve)] + except KeyError: + raise ValueError( + "Only SECP256R1, SECP384R1, and SECP521R1 curves are " + "supported by the SSH public key format" + ) + return b"ecdsa-sha2-" + curve_name + b" " + base64.b64encode( + serialization._ssh_write_string(b"ecdsa-sha2-" + curve_name) + + serialization._ssh_write_string(curve_name) + + serialization._ssh_write_string(public_numbers.encode_point()) + ) + + def _parameter_bytes(self, encoding, format, cdata): + if encoding is serialization.Encoding.OpenSSH: + raise TypeError( + "OpenSSH encoding is not supported" + ) + + # Only DH is supported here currently. + q = self._ffi.new("BIGNUM **") + self._lib.DH_get0_pqg(cdata, + self._ffi.NULL, + q, + self._ffi.NULL) + if encoding is serialization.Encoding.PEM: + if q[0] != self._ffi.NULL: + write_bio = self._lib.PEM_write_bio_DHxparams + else: + write_bio = self._lib.PEM_write_bio_DHparams + elif encoding is serialization.Encoding.DER: + if q[0] != self._ffi.NULL: + write_bio = self._lib.Cryptography_i2d_DHxparams_bio + else: + write_bio = self._lib.i2d_DHparams_bio + else: + raise TypeError("encoding must be an item from the Encoding enum") + + bio = self._create_mem_bio_gc() + res = write_bio(bio, cdata) + self.openssl_assert(res == 1) + return self._read_mem_bio(bio) + + def generate_dh_parameters(self, generator, key_size): + if key_size < 512: + raise ValueError("DH key_size must be at least 512 bits") + + if generator not in (2, 5): + raise ValueError("DH generator must be 2 or 5") + + dh_param_cdata = self._lib.DH_new() + self.openssl_assert(dh_param_cdata != self._ffi.NULL) + dh_param_cdata = self._ffi.gc(dh_param_cdata, self._lib.DH_free) + + res = self._lib.DH_generate_parameters_ex( + dh_param_cdata, + key_size, + generator, + self._ffi.NULL + ) + self.openssl_assert(res == 1) + + return _DHParameters(self, dh_param_cdata) + + def _dh_cdata_to_evp_pkey(self, dh_cdata): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set1_DH(evp_pkey, dh_cdata) + self.openssl_assert(res == 1) + return evp_pkey + + def generate_dh_private_key(self, parameters): + dh_key_cdata = _dh_params_dup(parameters._dh_cdata, self) + + res = self._lib.DH_generate_key(dh_key_cdata) + self.openssl_assert(res == 1) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_key_cdata) + + return _DHPrivateKey(self, dh_key_cdata, evp_pkey) + + def generate_dh_private_key_and_parameters(self, generator, key_size): + return self.generate_dh_private_key( + self.generate_dh_parameters(generator, key_size)) + + def load_dh_private_numbers(self, numbers): + parameter_numbers = numbers.public_numbers.parameter_numbers + + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(parameter_numbers.p) + g = self._int_to_bn(parameter_numbers.g) + + if parameter_numbers.q is not None: + q = self._int_to_bn(parameter_numbers.q) + else: + q = self._ffi.NULL + + pub_key = self._int_to_bn(numbers.public_numbers.y) + priv_key = self._int_to_bn(numbers.x) + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + res = self._lib.DH_set0_key(dh_cdata, pub_key, priv_key) + self.openssl_assert(res == 1) + + codes = self._ffi.new("int[]", 1) + res = self._lib.Cryptography_DH_check(dh_cdata, codes) + self.openssl_assert(res == 1) + + # DH_check will return DH_NOT_SUITABLE_GENERATOR if p % 24 does not + # equal 11 when the generator is 2 (a quadratic nonresidue). + # We want to ignore that error because p % 24 == 23 is also fine. + # Specifically, g is then a quadratic residue. Within the context of + # Diffie-Hellman this means it can only generate half the possible + # values. That sounds bad, but quadratic nonresidues leak a bit of + # the key to the attacker in exchange for having the full key space + # available. See: https://crypto.stackexchange.com/questions/12961 + if codes[0] != 0 and not ( + parameter_numbers.g == 2 and + codes[0] ^ self._lib.DH_NOT_SUITABLE_GENERATOR == 0 + ): + raise ValueError( + "DH private numbers did not pass safety checks." + ) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_cdata) + + return _DHPrivateKey(self, dh_cdata, evp_pkey) + + def load_dh_public_numbers(self, numbers): + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + parameter_numbers = numbers.parameter_numbers + + p = self._int_to_bn(parameter_numbers.p) + g = self._int_to_bn(parameter_numbers.g) + + if parameter_numbers.q is not None: + q = self._int_to_bn(parameter_numbers.q) + else: + q = self._ffi.NULL + + pub_key = self._int_to_bn(numbers.y) + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + res = self._lib.DH_set0_key(dh_cdata, pub_key, self._ffi.NULL) + self.openssl_assert(res == 1) + + evp_pkey = self._dh_cdata_to_evp_pkey(dh_cdata) + + return _DHPublicKey(self, dh_cdata, evp_pkey) + + def load_dh_parameter_numbers(self, numbers): + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(numbers.p) + g = self._int_to_bn(numbers.g) + + if numbers.q is not None: + q = self._int_to_bn(numbers.q) + else: + q = self._ffi.NULL + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + return _DHParameters(self, dh_cdata) + + def dh_parameters_supported(self, p, g, q=None): + dh_cdata = self._lib.DH_new() + self.openssl_assert(dh_cdata != self._ffi.NULL) + dh_cdata = self._ffi.gc(dh_cdata, self._lib.DH_free) + + p = self._int_to_bn(p) + g = self._int_to_bn(g) + + if q is not None: + q = self._int_to_bn(q) + else: + q = self._ffi.NULL + + res = self._lib.DH_set0_pqg(dh_cdata, p, q, g) + self.openssl_assert(res == 1) + + codes = self._ffi.new("int[]", 1) + res = self._lib.Cryptography_DH_check(dh_cdata, codes) + self.openssl_assert(res == 1) + + return codes[0] == 0 + + def dh_x942_serialization_supported(self): + return self._lib.Cryptography_HAS_EVP_PKEY_DHX == 1 + + def x509_name_bytes(self, name): + x509_name = _encode_name_gc(self, name) + pp = self._ffi.new("unsigned char **") + res = self._lib.i2d_X509_NAME(x509_name, pp) + self.openssl_assert(pp[0] != self._ffi.NULL) + pp = self._ffi.gc( + pp, lambda pointer: self._lib.OPENSSL_free(pointer[0]) + ) + self.openssl_assert(res > 0) + return self._ffi.buffer(pp[0], res)[:] + + def x25519_load_public_bytes(self, data): + evp_pkey = self._create_evp_pkey_gc() + res = self._lib.EVP_PKEY_set_type(evp_pkey, self._lib.NID_X25519) + backend.openssl_assert(res == 1) + res = self._lib.EVP_PKEY_set1_tls_encodedpoint( + evp_pkey, data, len(data) + ) + backend.openssl_assert(res == 1) + return _X25519PublicKey(self, evp_pkey) + + def x25519_load_private_bytes(self, data): + # OpenSSL only has facilities for loading PKCS8 formatted private + # keys using the algorithm identifiers specified in + # https://tools.ietf.org/html/draft-ietf-curdle-pkix-09. + # This is the standard PKCS8 prefix for a 32 byte X25519 key. + # The form is: + # 0:d=0 hl=2 l= 46 cons: SEQUENCE + # 2:d=1 hl=2 l= 1 prim: INTEGER :00 + # 5:d=1 hl=2 l= 5 cons: SEQUENCE + # 7:d=2 hl=2 l= 3 prim: OBJECT :1.3.101.110 + # 12:d=1 hl=2 l= 34 prim: OCTET STRING (the key) + # Of course there's a bit more complexity. In reality OCTET STRING + # contains an OCTET STRING of length 32! So the last two bytes here + # are \x04\x20, which is an OCTET STRING of length 32. + pkcs8_prefix = b'0.\x02\x01\x000\x05\x06\x03+en\x04"\x04 ' + bio = self._bytes_to_bio(pkcs8_prefix + data) + evp_pkey = backend._lib.d2i_PrivateKey_bio(bio.bio, self._ffi.NULL) + self.openssl_assert(evp_pkey != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free) + self.openssl_assert( + self._lib.EVP_PKEY_id(evp_pkey) == self._lib.EVP_PKEY_X25519 + ) + return _X25519PrivateKey(self, evp_pkey) + + def x25519_generate_key(self): + evp_pkey_ctx = self._lib.EVP_PKEY_CTX_new_id( + self._lib.NID_X25519, self._ffi.NULL + ) + self.openssl_assert(evp_pkey_ctx != self._ffi.NULL) + evp_pkey_ctx = self._ffi.gc( + evp_pkey_ctx, self._lib.EVP_PKEY_CTX_free + ) + res = self._lib.EVP_PKEY_keygen_init(evp_pkey_ctx) + self.openssl_assert(res == 1) + evp_ppkey = self._ffi.new("EVP_PKEY **") + res = self._lib.EVP_PKEY_keygen(evp_pkey_ctx, evp_ppkey) + self.openssl_assert(res == 1) + self.openssl_assert(evp_ppkey[0] != self._ffi.NULL) + evp_pkey = self._ffi.gc(evp_ppkey[0], self._lib.EVP_PKEY_free) + return _X25519PrivateKey(self, evp_pkey) + + def x25519_supported(self): + return self._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER + + def derive_scrypt(self, key_material, salt, length, n, r, p): + buf = self._ffi.new("unsigned char[]", length) + res = self._lib.EVP_PBE_scrypt( + key_material, len(key_material), salt, len(salt), n, r, p, + scrypt._MEM_LIMIT, buf, length + ) + self.openssl_assert(res == 1) + return self._ffi.buffer(buf)[:] + + def aead_cipher_supported(self, cipher): + cipher_name = aead._aead_cipher_name(cipher) + return ( + self._lib.EVP_get_cipherbyname(cipher_name) != self._ffi.NULL + ) + + +class GetCipherByName(object): + def __init__(self, fmt): + self._fmt = fmt + + def __call__(self, backend, cipher, mode): + cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower() + return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii")) + + +def _get_xts_cipher(backend, cipher, mode): + cipher_name = "aes-{0}-xts".format(cipher.key_size // 2) + return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii")) + + +backend = Backend() diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/ciphers.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/ciphers.py new file mode 100644 index 0000000..e0ee06e --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/ciphers.py @@ -0,0 +1,222 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import InvalidTag, UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import ciphers +from cryptography.hazmat.primitives.ciphers import modes + + +@utils.register_interface(ciphers.CipherContext) +@utils.register_interface(ciphers.AEADCipherContext) +@utils.register_interface(ciphers.AEADEncryptionContext) +@utils.register_interface(ciphers.AEADDecryptionContext) +class _CipherContext(object): + _ENCRYPT = 1 + _DECRYPT = 0 + + def __init__(self, backend, cipher, mode, operation): + self._backend = backend + self._cipher = cipher + self._mode = mode + self._operation = operation + self._tag = None + + if isinstance(self._cipher, ciphers.BlockCipherAlgorithm): + self._block_size_bytes = self._cipher.block_size // 8 + else: + self._block_size_bytes = 1 + + ctx = self._backend._lib.EVP_CIPHER_CTX_new() + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.EVP_CIPHER_CTX_free + ) + + registry = self._backend._cipher_registry + try: + adapter = registry[type(cipher), type(mode)] + except KeyError: + raise UnsupportedAlgorithm( + "cipher {0} in {1} mode is not supported " + "by this backend.".format( + cipher.name, mode.name if mode else mode), + _Reasons.UNSUPPORTED_CIPHER + ) + + evp_cipher = adapter(self._backend, cipher, mode) + if evp_cipher == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "cipher {0} in {1} mode is not supported " + "by this backend.".format( + cipher.name, mode.name if mode else mode), + _Reasons.UNSUPPORTED_CIPHER + ) + + if isinstance(mode, modes.ModeWithInitializationVector): + iv_nonce = mode.initialization_vector + elif isinstance(mode, modes.ModeWithTweak): + iv_nonce = mode.tweak + elif isinstance(mode, modes.ModeWithNonce): + iv_nonce = mode.nonce + elif isinstance(cipher, modes.ModeWithNonce): + iv_nonce = cipher.nonce + else: + iv_nonce = self._backend._ffi.NULL + # begin init with cipher and operation type + res = self._backend._lib.EVP_CipherInit_ex(ctx, evp_cipher, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + operation) + self._backend.openssl_assert(res != 0) + # set the key length to handle variable key ciphers + res = self._backend._lib.EVP_CIPHER_CTX_set_key_length( + ctx, len(cipher.key) + ) + self._backend.openssl_assert(res != 0) + if isinstance(mode, modes.GCM): + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, self._backend._lib.EVP_CTRL_AEAD_SET_IVLEN, + len(iv_nonce), self._backend._ffi.NULL + ) + self._backend.openssl_assert(res != 0) + if mode.tag is not None: + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, + len(mode.tag), mode.tag + ) + self._backend.openssl_assert(res != 0) + self._tag = mode.tag + elif ( + self._operation == self._DECRYPT and + self._backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and + not self._backend._lib.CRYPTOGRAPHY_IS_LIBRESSL + ): + raise NotImplementedError( + "delayed passing of GCM tag requires OpenSSL >= 1.0.2." + " To use this feature please update OpenSSL" + ) + + # pass key/iv + res = self._backend._lib.EVP_CipherInit_ex( + ctx, + self._backend._ffi.NULL, + self._backend._ffi.NULL, + cipher.key, + iv_nonce, + operation + ) + self._backend.openssl_assert(res != 0) + # We purposely disable padding here as it's handled higher up in the + # API. + self._backend._lib.EVP_CIPHER_CTX_set_padding(ctx, 0) + self._ctx = ctx + + def update(self, data): + buf = bytearray(len(data) + self._block_size_bytes - 1) + n = self.update_into(data, buf) + return bytes(buf[:n]) + + def update_into(self, data, buf): + if len(buf) < (len(data) + self._block_size_bytes - 1): + raise ValueError( + "buffer must be at least {0} bytes for this " + "payload".format(len(data) + self._block_size_bytes - 1) + ) + + buf = self._backend._ffi.cast( + "unsigned char *", self._backend._ffi.from_buffer(buf) + ) + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherUpdate(self._ctx, buf, outlen, + data, len(data)) + self._backend.openssl_assert(res != 0) + return outlen[0] + + def finalize(self): + # OpenSSL 1.0.1 on Ubuntu 12.04 (and possibly other distributions) + # appears to have a bug where you must make at least one call to update + # even if you are only using authenticate_additional_data or the + # GCM tag will be wrong. An (empty) call to update resolves this + # and is harmless for all other versions of OpenSSL. + if isinstance(self._mode, modes.GCM): + self.update(b"") + + if ( + self._operation == self._DECRYPT and + isinstance(self._mode, modes.ModeWithAuthenticationTag) and + self.tag is None + ): + raise ValueError( + "Authentication tag must be provided when decrypting." + ) + + buf = self._backend._ffi.new("unsigned char[]", self._block_size_bytes) + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherFinal_ex(self._ctx, buf, outlen) + if res == 0: + errors = self._backend._consume_errors() + + if not errors and isinstance(self._mode, modes.GCM): + raise InvalidTag + + self._backend.openssl_assert( + errors[0]._lib_reason_match( + self._backend._lib.ERR_LIB_EVP, + self._backend._lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH + ) + ) + raise ValueError( + "The length of the provided data is not a multiple of " + "the block length." + ) + + if (isinstance(self._mode, modes.GCM) and + self._operation == self._ENCRYPT): + tag_buf = self._backend._ffi.new( + "unsigned char[]", self._block_size_bytes + ) + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + self._ctx, self._backend._lib.EVP_CTRL_AEAD_GET_TAG, + self._block_size_bytes, tag_buf + ) + self._backend.openssl_assert(res != 0) + self._tag = self._backend._ffi.buffer(tag_buf)[:] + + res = self._backend._lib.EVP_CIPHER_CTX_cleanup(self._ctx) + self._backend.openssl_assert(res == 1) + return self._backend._ffi.buffer(buf)[:outlen[0]] + + def finalize_with_tag(self, tag): + if ( + self._backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and + not self._backend._lib.CRYPTOGRAPHY_IS_LIBRESSL + ): + raise NotImplementedError( + "finalize_with_tag requires OpenSSL >= 1.0.2. To use this " + "method please update OpenSSL" + ) + if len(tag) < self._mode._min_tag_length: + raise ValueError( + "Authentication tag must be {0} bytes or longer.".format( + self._mode._min_tag_length) + ) + res = self._backend._lib.EVP_CIPHER_CTX_ctrl( + self._ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, + len(tag), tag + ) + self._backend.openssl_assert(res != 0) + self._tag = tag + return self.finalize() + + def authenticate_additional_data(self, data): + outlen = self._backend._ffi.new("int *") + res = self._backend._lib.EVP_CipherUpdate( + self._ctx, self._backend._ffi.NULL, outlen, data, len(data) + ) + self._backend.openssl_assert(res != 0) + + tag = utils.read_only_property("_tag") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/cmac.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/cmac.py new file mode 100644 index 0000000..e20f66d --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/cmac.py @@ -0,0 +1,81 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.primitives import constant_time, mac +from cryptography.hazmat.primitives.ciphers.modes import CBC + + +@utils.register_interface(mac.MACContext) +class _CMACContext(object): + def __init__(self, backend, algorithm, ctx=None): + if not backend.cmac_algorithm_supported(algorithm): + raise UnsupportedAlgorithm("This backend does not support CMAC.", + _Reasons.UNSUPPORTED_CIPHER) + + self._backend = backend + self._key = algorithm.key + self._algorithm = algorithm + self._output_length = algorithm.block_size // 8 + + if ctx is None: + registry = self._backend._cipher_registry + adapter = registry[type(algorithm), CBC] + + evp_cipher = adapter(self._backend, algorithm, CBC) + + ctx = self._backend._lib.CMAC_CTX_new() + + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.CMAC_CTX_free) + + res = self._backend._lib.CMAC_Init( + ctx, self._key, len(self._key), + evp_cipher, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res == 1) + + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def update(self, data): + res = self._backend._lib.CMAC_Update(self._ctx, data, len(data)) + self._backend.openssl_assert(res == 1) + + def finalize(self): + buf = self._backend._ffi.new("unsigned char[]", self._output_length) + length = self._backend._ffi.new("size_t *", self._output_length) + res = self._backend._lib.CMAC_Final( + self._ctx, buf, length + ) + self._backend.openssl_assert(res == 1) + + self._ctx = None + + return self._backend._ffi.buffer(buf)[:] + + def copy(self): + copied_ctx = self._backend._lib.CMAC_CTX_new() + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.CMAC_CTX_free + ) + res = self._backend._lib.CMAC_CTX_copy( + copied_ctx, self._ctx + ) + self._backend.openssl_assert(res == 1) + return _CMACContext( + self._backend, self._algorithm, ctx=copied_ctx + ) + + def verify(self, signature): + digest = self.finalize() + if not constant_time.bytes_eq(digest, signature): + raise InvalidSignature("Signature did not match digest.") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/decode_asn1.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/decode_asn1.py new file mode 100644 index 0000000..31fb8cf --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/decode_asn1.py @@ -0,0 +1,826 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import datetime +import ipaddress + +from asn1crypto.core import Integer, SequenceOf + +from cryptography import x509 +from cryptography.x509.extensions import _TLS_FEATURE_TYPE_TO_ENUM +from cryptography.x509.name import _ASN1_TYPE_TO_ENUM +from cryptography.x509.oid import ( + CRLEntryExtensionOID, CertificatePoliciesOID, ExtensionOID +) + + +class _Integers(SequenceOf): + _child_spec = Integer + + +def _obj2txt(backend, obj): + # Set to 80 on the recommendation of + # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values + # + # But OIDs longer than this occur in real life (e.g. Active + # Directory makes some very long OIDs). So we need to detect + # and properly handle the case where the default buffer is not + # big enough. + # + buf_len = 80 + buf = backend._ffi.new("char[]", buf_len) + + # 'res' is the number of bytes that *would* be written if the + # buffer is large enough. If 'res' > buf_len - 1, we need to + # alloc a big-enough buffer and go again. + res = backend._lib.OBJ_obj2txt(buf, buf_len, obj, 1) + if res > buf_len - 1: # account for terminating null byte + buf_len = res + 1 + buf = backend._ffi.new("char[]", buf_len) + res = backend._lib.OBJ_obj2txt(buf, buf_len, obj, 1) + backend.openssl_assert(res > 0) + return backend._ffi.buffer(buf, res)[:].decode() + + +def _decode_x509_name_entry(backend, x509_name_entry): + obj = backend._lib.X509_NAME_ENTRY_get_object(x509_name_entry) + backend.openssl_assert(obj != backend._ffi.NULL) + data = backend._lib.X509_NAME_ENTRY_get_data(x509_name_entry) + backend.openssl_assert(data != backend._ffi.NULL) + value = _asn1_string_to_utf8(backend, data) + oid = _obj2txt(backend, obj) + type = _ASN1_TYPE_TO_ENUM[data.type] + + return x509.NameAttribute(x509.ObjectIdentifier(oid), value, type) + + +def _decode_x509_name(backend, x509_name): + count = backend._lib.X509_NAME_entry_count(x509_name) + attributes = [] + prev_set_id = -1 + for x in range(count): + entry = backend._lib.X509_NAME_get_entry(x509_name, x) + attribute = _decode_x509_name_entry(backend, entry) + set_id = backend._lib.Cryptography_X509_NAME_ENTRY_set(entry) + if set_id != prev_set_id: + attributes.append(set([attribute])) + else: + # is in the same RDN a previous entry + attributes[-1].add(attribute) + prev_set_id = set_id + + return x509.Name(x509.RelativeDistinguishedName(rdn) for rdn in attributes) + + +def _decode_general_names(backend, gns): + num = backend._lib.sk_GENERAL_NAME_num(gns) + names = [] + for i in range(num): + gn = backend._lib.sk_GENERAL_NAME_value(gns, i) + backend.openssl_assert(gn != backend._ffi.NULL) + names.append(_decode_general_name(backend, gn)) + + return names + + +def _decode_general_name(backend, gn): + if gn.type == backend._lib.GEN_DNS: + # Convert to bytes and then decode to utf8. We don't use + # asn1_string_to_utf8 here because it doesn't properly convert + # utf8 from ia5strings. + data = _asn1_string_to_bytes(backend, gn.d.dNSName).decode("utf8") + # We don't use the constructor for DNSName so we can bypass validation + # This allows us to create DNSName objects that have unicode chars + # when a certificate (against the RFC) contains them. + return x509.DNSName._init_without_validation(data) + elif gn.type == backend._lib.GEN_URI: + # Convert to bytes and then decode to utf8. We don't use + # asn1_string_to_utf8 here because it doesn't properly convert + # utf8 from ia5strings. + data = _asn1_string_to_bytes( + backend, gn.d.uniformResourceIdentifier + ).decode("utf8") + # We don't use the constructor for URI so we can bypass validation + # This allows us to create URI objects that have unicode chars + # when a certificate (against the RFC) contains them. + return x509.UniformResourceIdentifier._init_without_validation(data) + elif gn.type == backend._lib.GEN_RID: + oid = _obj2txt(backend, gn.d.registeredID) + return x509.RegisteredID(x509.ObjectIdentifier(oid)) + elif gn.type == backend._lib.GEN_IPADD: + data = _asn1_string_to_bytes(backend, gn.d.iPAddress) + data_len = len(data) + if data_len == 8 or data_len == 32: + # This is an IPv4 or IPv6 Network and not a single IP. This + # type of data appears in Name Constraints. Unfortunately, + # ipaddress doesn't support packed bytes + netmask. Additionally, + # IPv6Network can only handle CIDR rather than the full 16 byte + # netmask. To handle this we convert the netmask to integer, then + # find the first 0 bit, which will be the prefix. If another 1 + # bit is present after that the netmask is invalid. + base = ipaddress.ip_address(data[:data_len // 2]) + netmask = ipaddress.ip_address(data[data_len // 2:]) + bits = bin(int(netmask))[2:] + prefix = bits.find('0') + # If no 0 bits are found it is a /32 or /128 + if prefix == -1: + prefix = len(bits) + + if "1" in bits[prefix:]: + raise ValueError("Invalid netmask") + + ip = ipaddress.ip_network(base.exploded + u"/{0}".format(prefix)) + else: + ip = ipaddress.ip_address(data) + + return x509.IPAddress(ip) + elif gn.type == backend._lib.GEN_DIRNAME: + return x509.DirectoryName( + _decode_x509_name(backend, gn.d.directoryName) + ) + elif gn.type == backend._lib.GEN_EMAIL: + # Convert to bytes and then decode to utf8. We don't use + # asn1_string_to_utf8 here because it doesn't properly convert + # utf8 from ia5strings. + data = _asn1_string_to_bytes(backend, gn.d.rfc822Name).decode("utf8") + # We don't use the constructor for RFC822Name so we can bypass + # validation. This allows us to create RFC822Name objects that have + # unicode chars when a certificate (against the RFC) contains them. + return x509.RFC822Name._init_without_validation(data) + elif gn.type == backend._lib.GEN_OTHERNAME: + type_id = _obj2txt(backend, gn.d.otherName.type_id) + value = _asn1_to_der(backend, gn.d.otherName.value) + return x509.OtherName(x509.ObjectIdentifier(type_id), value) + else: + # x400Address or ediPartyName + raise x509.UnsupportedGeneralNameType( + "{0} is not a supported type".format( + x509._GENERAL_NAMES.get(gn.type, gn.type) + ), + gn.type + ) + + +def _decode_ocsp_no_check(backend, ext): + return x509.OCSPNoCheck() + + +def _decode_crl_number(backend, ext): + asn1_int = backend._ffi.cast("ASN1_INTEGER *", ext) + asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free) + return x509.CRLNumber(_asn1_integer_to_int(backend, asn1_int)) + + +def _decode_delta_crl_indicator(backend, ext): + asn1_int = backend._ffi.cast("ASN1_INTEGER *", ext) + asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free) + return x509.DeltaCRLIndicator(_asn1_integer_to_int(backend, asn1_int)) + + +class _X509ExtensionParser(object): + def __init__(self, ext_count, get_ext, handlers): + self.ext_count = ext_count + self.get_ext = get_ext + self.handlers = handlers + + def parse(self, backend, x509_obj): + extensions = [] + seen_oids = set() + for i in range(self.ext_count(backend, x509_obj)): + ext = self.get_ext(backend, x509_obj, i) + backend.openssl_assert(ext != backend._ffi.NULL) + crit = backend._lib.X509_EXTENSION_get_critical(ext) + critical = crit == 1 + oid = x509.ObjectIdentifier( + _obj2txt(backend, backend._lib.X509_EXTENSION_get_object(ext)) + ) + if oid in seen_oids: + raise x509.DuplicateExtension( + "Duplicate {0} extension found".format(oid), oid + ) + + # This OID is only supported in OpenSSL 1.1.0+ but we want + # to support it in all versions of OpenSSL so we decode it + # ourselves. + if oid == ExtensionOID.TLS_FEATURE: + data = backend._lib.X509_EXTENSION_get_data(ext) + parsed = _Integers.load(_asn1_string_to_bytes(backend, data)) + value = x509.TLSFeature( + [_TLS_FEATURE_TYPE_TO_ENUM[x.native] for x in parsed] + ) + extensions.append(x509.Extension(oid, critical, value)) + seen_oids.add(oid) + continue + + try: + handler = self.handlers[oid] + except KeyError: + # Dump the DER payload into an UnrecognizedExtension object + data = backend._lib.X509_EXTENSION_get_data(ext) + backend.openssl_assert(data != backend._ffi.NULL) + der = backend._ffi.buffer(data.data, data.length)[:] + unrecognized = x509.UnrecognizedExtension(oid, der) + extensions.append( + x509.Extension(oid, critical, unrecognized) + ) + else: + ext_data = backend._lib.X509V3_EXT_d2i(ext) + if ext_data == backend._ffi.NULL: + backend._consume_errors() + raise ValueError( + "The {0} extension is invalid and can't be " + "parsed".format(oid) + ) + + value = handler(backend, ext_data) + extensions.append(x509.Extension(oid, critical, value)) + + seen_oids.add(oid) + + return x509.Extensions(extensions) + + +def _decode_certificate_policies(backend, cp): + cp = backend._ffi.cast("Cryptography_STACK_OF_POLICYINFO *", cp) + cp = backend._ffi.gc(cp, backend._lib.CERTIFICATEPOLICIES_free) + + num = backend._lib.sk_POLICYINFO_num(cp) + certificate_policies = [] + for i in range(num): + qualifiers = None + pi = backend._lib.sk_POLICYINFO_value(cp, i) + oid = x509.ObjectIdentifier(_obj2txt(backend, pi.policyid)) + if pi.qualifiers != backend._ffi.NULL: + qnum = backend._lib.sk_POLICYQUALINFO_num(pi.qualifiers) + qualifiers = [] + for j in range(qnum): + pqi = backend._lib.sk_POLICYQUALINFO_value( + pi.qualifiers, j + ) + pqualid = x509.ObjectIdentifier( + _obj2txt(backend, pqi.pqualid) + ) + if pqualid == CertificatePoliciesOID.CPS_QUALIFIER: + cpsuri = backend._ffi.buffer( + pqi.d.cpsuri.data, pqi.d.cpsuri.length + )[:].decode('ascii') + qualifiers.append(cpsuri) + else: + assert pqualid == CertificatePoliciesOID.CPS_USER_NOTICE + user_notice = _decode_user_notice( + backend, pqi.d.usernotice + ) + qualifiers.append(user_notice) + + certificate_policies.append( + x509.PolicyInformation(oid, qualifiers) + ) + + return x509.CertificatePolicies(certificate_policies) + + +def _decode_user_notice(backend, un): + explicit_text = None + notice_reference = None + + if un.exptext != backend._ffi.NULL: + explicit_text = _asn1_string_to_utf8(backend, un.exptext) + + if un.noticeref != backend._ffi.NULL: + organization = _asn1_string_to_utf8( + backend, un.noticeref.organization + ) + + num = backend._lib.sk_ASN1_INTEGER_num( + un.noticeref.noticenos + ) + notice_numbers = [] + for i in range(num): + asn1_int = backend._lib.sk_ASN1_INTEGER_value( + un.noticeref.noticenos, i + ) + notice_num = _asn1_integer_to_int(backend, asn1_int) + notice_numbers.append(notice_num) + + notice_reference = x509.NoticeReference( + organization, notice_numbers + ) + + return x509.UserNotice(notice_reference, explicit_text) + + +def _decode_basic_constraints(backend, bc_st): + basic_constraints = backend._ffi.cast("BASIC_CONSTRAINTS *", bc_st) + basic_constraints = backend._ffi.gc( + basic_constraints, backend._lib.BASIC_CONSTRAINTS_free + ) + # The byte representation of an ASN.1 boolean true is \xff. OpenSSL + # chooses to just map this to its ordinal value, so true is 255 and + # false is 0. + ca = basic_constraints.ca == 255 + path_length = _asn1_integer_to_int_or_none( + backend, basic_constraints.pathlen + ) + + return x509.BasicConstraints(ca, path_length) + + +def _decode_subject_key_identifier(backend, asn1_string): + asn1_string = backend._ffi.cast("ASN1_OCTET_STRING *", asn1_string) + asn1_string = backend._ffi.gc( + asn1_string, backend._lib.ASN1_OCTET_STRING_free + ) + return x509.SubjectKeyIdentifier( + backend._ffi.buffer(asn1_string.data, asn1_string.length)[:] + ) + + +def _decode_authority_key_identifier(backend, akid): + akid = backend._ffi.cast("AUTHORITY_KEYID *", akid) + akid = backend._ffi.gc(akid, backend._lib.AUTHORITY_KEYID_free) + key_identifier = None + authority_cert_issuer = None + + if akid.keyid != backend._ffi.NULL: + key_identifier = backend._ffi.buffer( + akid.keyid.data, akid.keyid.length + )[:] + + if akid.issuer != backend._ffi.NULL: + authority_cert_issuer = _decode_general_names( + backend, akid.issuer + ) + + authority_cert_serial_number = _asn1_integer_to_int_or_none( + backend, akid.serial + ) + + return x509.AuthorityKeyIdentifier( + key_identifier, authority_cert_issuer, authority_cert_serial_number + ) + + +def _decode_authority_information_access(backend, aia): + aia = backend._ffi.cast("Cryptography_STACK_OF_ACCESS_DESCRIPTION *", aia) + aia = backend._ffi.gc(aia, backend._lib.sk_ACCESS_DESCRIPTION_free) + num = backend._lib.sk_ACCESS_DESCRIPTION_num(aia) + access_descriptions = [] + for i in range(num): + ad = backend._lib.sk_ACCESS_DESCRIPTION_value(aia, i) + backend.openssl_assert(ad.method != backend._ffi.NULL) + oid = x509.ObjectIdentifier(_obj2txt(backend, ad.method)) + backend.openssl_assert(ad.location != backend._ffi.NULL) + gn = _decode_general_name(backend, ad.location) + access_descriptions.append(x509.AccessDescription(oid, gn)) + + return x509.AuthorityInformationAccess(access_descriptions) + + +def _decode_key_usage(backend, bit_string): + bit_string = backend._ffi.cast("ASN1_BIT_STRING *", bit_string) + bit_string = backend._ffi.gc(bit_string, backend._lib.ASN1_BIT_STRING_free) + get_bit = backend._lib.ASN1_BIT_STRING_get_bit + digital_signature = get_bit(bit_string, 0) == 1 + content_commitment = get_bit(bit_string, 1) == 1 + key_encipherment = get_bit(bit_string, 2) == 1 + data_encipherment = get_bit(bit_string, 3) == 1 + key_agreement = get_bit(bit_string, 4) == 1 + key_cert_sign = get_bit(bit_string, 5) == 1 + crl_sign = get_bit(bit_string, 6) == 1 + encipher_only = get_bit(bit_string, 7) == 1 + decipher_only = get_bit(bit_string, 8) == 1 + return x509.KeyUsage( + digital_signature, + content_commitment, + key_encipherment, + data_encipherment, + key_agreement, + key_cert_sign, + crl_sign, + encipher_only, + decipher_only + ) + + +def _decode_general_names_extension(backend, gns): + gns = backend._ffi.cast("GENERAL_NAMES *", gns) + gns = backend._ffi.gc(gns, backend._lib.GENERAL_NAMES_free) + general_names = _decode_general_names(backend, gns) + return general_names + + +def _decode_subject_alt_name(backend, ext): + return x509.SubjectAlternativeName( + _decode_general_names_extension(backend, ext) + ) + + +def _decode_issuer_alt_name(backend, ext): + return x509.IssuerAlternativeName( + _decode_general_names_extension(backend, ext) + ) + + +def _decode_name_constraints(backend, nc): + nc = backend._ffi.cast("NAME_CONSTRAINTS *", nc) + nc = backend._ffi.gc(nc, backend._lib.NAME_CONSTRAINTS_free) + permitted = _decode_general_subtrees(backend, nc.permittedSubtrees) + excluded = _decode_general_subtrees(backend, nc.excludedSubtrees) + return x509.NameConstraints( + permitted_subtrees=permitted, excluded_subtrees=excluded + ) + + +def _decode_general_subtrees(backend, stack_subtrees): + if stack_subtrees == backend._ffi.NULL: + return None + + num = backend._lib.sk_GENERAL_SUBTREE_num(stack_subtrees) + subtrees = [] + + for i in range(num): + obj = backend._lib.sk_GENERAL_SUBTREE_value(stack_subtrees, i) + backend.openssl_assert(obj != backend._ffi.NULL) + name = _decode_general_name(backend, obj.base) + subtrees.append(name) + + return subtrees + + +def _decode_policy_constraints(backend, pc): + pc = backend._ffi.cast("POLICY_CONSTRAINTS *", pc) + pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free) + + require_explicit_policy = _asn1_integer_to_int_or_none( + backend, pc.requireExplicitPolicy + ) + inhibit_policy_mapping = _asn1_integer_to_int_or_none( + backend, pc.inhibitPolicyMapping + ) + + return x509.PolicyConstraints( + require_explicit_policy, inhibit_policy_mapping + ) + + +def _decode_extended_key_usage(backend, sk): + sk = backend._ffi.cast("Cryptography_STACK_OF_ASN1_OBJECT *", sk) + sk = backend._ffi.gc(sk, backend._lib.sk_ASN1_OBJECT_free) + num = backend._lib.sk_ASN1_OBJECT_num(sk) + ekus = [] + + for i in range(num): + obj = backend._lib.sk_ASN1_OBJECT_value(sk, i) + backend.openssl_assert(obj != backend._ffi.NULL) + oid = x509.ObjectIdentifier(_obj2txt(backend, obj)) + ekus.append(oid) + + return x509.ExtendedKeyUsage(ekus) + + +_DISTPOINT_TYPE_FULLNAME = 0 +_DISTPOINT_TYPE_RELATIVENAME = 1 + + +def _decode_dist_points(backend, cdps): + cdps = backend._ffi.cast("Cryptography_STACK_OF_DIST_POINT *", cdps) + cdps = backend._ffi.gc(cdps, backend._lib.CRL_DIST_POINTS_free) + + num = backend._lib.sk_DIST_POINT_num(cdps) + dist_points = [] + for i in range(num): + full_name = None + relative_name = None + crl_issuer = None + reasons = None + cdp = backend._lib.sk_DIST_POINT_value(cdps, i) + if cdp.reasons != backend._ffi.NULL: + # We will check each bit from RFC 5280 + # ReasonFlags ::= BIT STRING { + # unused (0), + # keyCompromise (1), + # cACompromise (2), + # affiliationChanged (3), + # superseded (4), + # cessationOfOperation (5), + # certificateHold (6), + # privilegeWithdrawn (7), + # aACompromise (8) } + reasons = [] + get_bit = backend._lib.ASN1_BIT_STRING_get_bit + if get_bit(cdp.reasons, 1): + reasons.append(x509.ReasonFlags.key_compromise) + + if get_bit(cdp.reasons, 2): + reasons.append(x509.ReasonFlags.ca_compromise) + + if get_bit(cdp.reasons, 3): + reasons.append(x509.ReasonFlags.affiliation_changed) + + if get_bit(cdp.reasons, 4): + reasons.append(x509.ReasonFlags.superseded) + + if get_bit(cdp.reasons, 5): + reasons.append(x509.ReasonFlags.cessation_of_operation) + + if get_bit(cdp.reasons, 6): + reasons.append(x509.ReasonFlags.certificate_hold) + + if get_bit(cdp.reasons, 7): + reasons.append(x509.ReasonFlags.privilege_withdrawn) + + if get_bit(cdp.reasons, 8): + reasons.append(x509.ReasonFlags.aa_compromise) + + reasons = frozenset(reasons) + + if cdp.CRLissuer != backend._ffi.NULL: + crl_issuer = _decode_general_names(backend, cdp.CRLissuer) + + # Certificates may have a crl_issuer/reasons and no distribution + # point so make sure it's not null. + if cdp.distpoint != backend._ffi.NULL: + # Type 0 is fullName, there is no #define for it in the code. + if cdp.distpoint.type == _DISTPOINT_TYPE_FULLNAME: + full_name = _decode_general_names( + backend, cdp.distpoint.name.fullname + ) + # OpenSSL code doesn't test for a specific type for + # relativename, everything that isn't fullname is considered + # relativename. Per RFC 5280: + # + # DistributionPointName ::= CHOICE { + # fullName [0] GeneralNames, + # nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + else: + rns = cdp.distpoint.name.relativename + rnum = backend._lib.sk_X509_NAME_ENTRY_num(rns) + attributes = set() + for i in range(rnum): + rn = backend._lib.sk_X509_NAME_ENTRY_value( + rns, i + ) + backend.openssl_assert(rn != backend._ffi.NULL) + attributes.add( + _decode_x509_name_entry(backend, rn) + ) + + relative_name = x509.RelativeDistinguishedName(attributes) + + dist_points.append( + x509.DistributionPoint( + full_name, relative_name, reasons, crl_issuer + ) + ) + + return dist_points + + +def _decode_crl_distribution_points(backend, cdps): + dist_points = _decode_dist_points(backend, cdps) + return x509.CRLDistributionPoints(dist_points) + + +def _decode_freshest_crl(backend, cdps): + dist_points = _decode_dist_points(backend, cdps) + return x509.FreshestCRL(dist_points) + + +def _decode_inhibit_any_policy(backend, asn1_int): + asn1_int = backend._ffi.cast("ASN1_INTEGER *", asn1_int) + asn1_int = backend._ffi.gc(asn1_int, backend._lib.ASN1_INTEGER_free) + skip_certs = _asn1_integer_to_int(backend, asn1_int) + return x509.InhibitAnyPolicy(skip_certs) + + +def _decode_precert_signed_certificate_timestamps(backend, asn1_scts): + from cryptography.hazmat.backends.openssl.x509 import ( + _SignedCertificateTimestamp + ) + asn1_scts = backend._ffi.cast("Cryptography_STACK_OF_SCT *", asn1_scts) + asn1_scts = backend._ffi.gc(asn1_scts, backend._lib.SCT_LIST_free) + + scts = [] + for i in range(backend._lib.sk_SCT_num(asn1_scts)): + sct = backend._lib.sk_SCT_value(asn1_scts, i) + + scts.append(_SignedCertificateTimestamp(backend, asn1_scts, sct)) + return x509.PrecertificateSignedCertificateTimestamps(scts) + + +# CRLReason ::= ENUMERATED { +# unspecified (0), +# keyCompromise (1), +# cACompromise (2), +# affiliationChanged (3), +# superseded (4), +# cessationOfOperation (5), +# certificateHold (6), +# -- value 7 is not used +# removeFromCRL (8), +# privilegeWithdrawn (9), +# aACompromise (10) } +_CRL_ENTRY_REASON_CODE_TO_ENUM = { + 0: x509.ReasonFlags.unspecified, + 1: x509.ReasonFlags.key_compromise, + 2: x509.ReasonFlags.ca_compromise, + 3: x509.ReasonFlags.affiliation_changed, + 4: x509.ReasonFlags.superseded, + 5: x509.ReasonFlags.cessation_of_operation, + 6: x509.ReasonFlags.certificate_hold, + 8: x509.ReasonFlags.remove_from_crl, + 9: x509.ReasonFlags.privilege_withdrawn, + 10: x509.ReasonFlags.aa_compromise, +} + + +_CRL_ENTRY_REASON_ENUM_TO_CODE = { + x509.ReasonFlags.unspecified: 0, + x509.ReasonFlags.key_compromise: 1, + x509.ReasonFlags.ca_compromise: 2, + x509.ReasonFlags.affiliation_changed: 3, + x509.ReasonFlags.superseded: 4, + x509.ReasonFlags.cessation_of_operation: 5, + x509.ReasonFlags.certificate_hold: 6, + x509.ReasonFlags.remove_from_crl: 8, + x509.ReasonFlags.privilege_withdrawn: 9, + x509.ReasonFlags.aa_compromise: 10 +} + + +def _decode_crl_reason(backend, enum): + enum = backend._ffi.cast("ASN1_ENUMERATED *", enum) + enum = backend._ffi.gc(enum, backend._lib.ASN1_ENUMERATED_free) + code = backend._lib.ASN1_ENUMERATED_get(enum) + + try: + return x509.CRLReason(_CRL_ENTRY_REASON_CODE_TO_ENUM[code]) + except KeyError: + raise ValueError("Unsupported reason code: {0}".format(code)) + + +def _decode_invalidity_date(backend, inv_date): + generalized_time = backend._ffi.cast( + "ASN1_GENERALIZEDTIME *", inv_date + ) + generalized_time = backend._ffi.gc( + generalized_time, backend._lib.ASN1_GENERALIZEDTIME_free + ) + return x509.InvalidityDate( + _parse_asn1_generalized_time(backend, generalized_time) + ) + + +def _decode_cert_issuer(backend, gns): + gns = backend._ffi.cast("GENERAL_NAMES *", gns) + gns = backend._ffi.gc(gns, backend._lib.GENERAL_NAMES_free) + general_names = _decode_general_names(backend, gns) + return x509.CertificateIssuer(general_names) + + +def _asn1_to_der(backend, asn1_type): + buf = backend._ffi.new("unsigned char **") + res = backend._lib.i2d_ASN1_TYPE(asn1_type, buf) + backend.openssl_assert(res >= 0) + backend.openssl_assert(buf[0] != backend._ffi.NULL) + buf = backend._ffi.gc( + buf, lambda buffer: backend._lib.OPENSSL_free(buffer[0]) + ) + return backend._ffi.buffer(buf[0], res)[:] + + +def _asn1_integer_to_int(backend, asn1_int): + bn = backend._lib.ASN1_INTEGER_to_BN(asn1_int, backend._ffi.NULL) + backend.openssl_assert(bn != backend._ffi.NULL) + bn = backend._ffi.gc(bn, backend._lib.BN_free) + return backend._bn_to_int(bn) + + +def _asn1_integer_to_int_or_none(backend, asn1_int): + if asn1_int == backend._ffi.NULL: + return None + else: + return _asn1_integer_to_int(backend, asn1_int) + + +def _asn1_string_to_bytes(backend, asn1_string): + return backend._ffi.buffer(asn1_string.data, asn1_string.length)[:] + + +def _asn1_string_to_ascii(backend, asn1_string): + return _asn1_string_to_bytes(backend, asn1_string).decode("ascii") + + +def _asn1_string_to_utf8(backend, asn1_string): + buf = backend._ffi.new("unsigned char **") + res = backend._lib.ASN1_STRING_to_UTF8(buf, asn1_string) + if res == -1: + raise ValueError( + "Unsupported ASN1 string type. Type: {0}".format(asn1_string.type) + ) + + backend.openssl_assert(buf[0] != backend._ffi.NULL) + buf = backend._ffi.gc( + buf, lambda buffer: backend._lib.OPENSSL_free(buffer[0]) + ) + return backend._ffi.buffer(buf[0], res)[:].decode('utf8') + + +def _parse_asn1_time(backend, asn1_time): + backend.openssl_assert(asn1_time != backend._ffi.NULL) + generalized_time = backend._lib.ASN1_TIME_to_generalizedtime( + asn1_time, backend._ffi.NULL + ) + if generalized_time == backend._ffi.NULL: + raise ValueError( + "Couldn't parse ASN.1 time as generalizedtime {!r}".format( + _asn1_string_to_bytes(backend, asn1_time) + ) + ) + + generalized_time = backend._ffi.gc( + generalized_time, backend._lib.ASN1_GENERALIZEDTIME_free + ) + return _parse_asn1_generalized_time(backend, generalized_time) + + +def _parse_asn1_generalized_time(backend, generalized_time): + time = _asn1_string_to_ascii( + backend, backend._ffi.cast("ASN1_STRING *", generalized_time) + ) + return datetime.datetime.strptime(time, "%Y%m%d%H%M%SZ") + + +_EXTENSION_HANDLERS_NO_SCT = { + ExtensionOID.BASIC_CONSTRAINTS: _decode_basic_constraints, + ExtensionOID.SUBJECT_KEY_IDENTIFIER: _decode_subject_key_identifier, + ExtensionOID.KEY_USAGE: _decode_key_usage, + ExtensionOID.SUBJECT_ALTERNATIVE_NAME: _decode_subject_alt_name, + ExtensionOID.EXTENDED_KEY_USAGE: _decode_extended_key_usage, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _decode_authority_key_identifier, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _decode_authority_information_access + ), + ExtensionOID.CERTIFICATE_POLICIES: _decode_certificate_policies, + ExtensionOID.CRL_DISTRIBUTION_POINTS: _decode_crl_distribution_points, + ExtensionOID.FRESHEST_CRL: _decode_freshest_crl, + ExtensionOID.OCSP_NO_CHECK: _decode_ocsp_no_check, + ExtensionOID.INHIBIT_ANY_POLICY: _decode_inhibit_any_policy, + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _decode_issuer_alt_name, + ExtensionOID.NAME_CONSTRAINTS: _decode_name_constraints, + ExtensionOID.POLICY_CONSTRAINTS: _decode_policy_constraints, +} +_EXTENSION_HANDLERS = _EXTENSION_HANDLERS_NO_SCT.copy() +_EXTENSION_HANDLERS[ + ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS +] = _decode_precert_signed_certificate_timestamps + + +_REVOKED_EXTENSION_HANDLERS = { + CRLEntryExtensionOID.CRL_REASON: _decode_crl_reason, + CRLEntryExtensionOID.INVALIDITY_DATE: _decode_invalidity_date, + CRLEntryExtensionOID.CERTIFICATE_ISSUER: _decode_cert_issuer, +} + +_CRL_EXTENSION_HANDLERS = { + ExtensionOID.CRL_NUMBER: _decode_crl_number, + ExtensionOID.DELTA_CRL_INDICATOR: _decode_delta_crl_indicator, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _decode_authority_key_identifier, + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _decode_issuer_alt_name, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _decode_authority_information_access + ), +} + +_CERTIFICATE_EXTENSION_PARSER_NO_SCT = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_get_ext(x, i), + handlers=_EXTENSION_HANDLERS_NO_SCT +) + +_CERTIFICATE_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_get_ext(x, i), + handlers=_EXTENSION_HANDLERS +) + +_CSR_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.sk_X509_EXTENSION_num(x), + get_ext=lambda backend, x, i: backend._lib.sk_X509_EXTENSION_value(x, i), + handlers=_EXTENSION_HANDLERS +) + +_REVOKED_CERTIFICATE_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_REVOKED_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_REVOKED_get_ext(x, i), + handlers=_REVOKED_EXTENSION_HANDLERS, +) + +_CRL_EXTENSION_PARSER = _X509ExtensionParser( + ext_count=lambda backend, x: backend._lib.X509_CRL_get_ext_count(x), + get_ext=lambda backend, x, i: backend._lib.X509_CRL_get_ext(x, i), + handlers=_CRL_EXTENSION_HANDLERS, +) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/dh.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/dh.py new file mode 100644 index 0000000..095f062 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/dh.py @@ -0,0 +1,280 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import dh + + +def _dh_params_dup(dh_cdata, backend): + lib = backend._lib + ffi = backend._ffi + + param_cdata = lib.DHparams_dup(dh_cdata) + backend.openssl_assert(param_cdata != ffi.NULL) + param_cdata = ffi.gc(param_cdata, lib.DH_free) + if lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102: + # In OpenSSL versions < 1.0.2 or libressl DHparams_dup don't copy q + q = ffi.new("BIGNUM **") + lib.DH_get0_pqg(dh_cdata, ffi.NULL, q, ffi.NULL) + q_dup = lib.BN_dup(q[0]) + res = lib.DH_set0_pqg(param_cdata, ffi.NULL, q_dup, ffi.NULL) + backend.openssl_assert(res == 1) + + return param_cdata + + +def _dh_cdata_to_parameters(dh_cdata, backend): + param_cdata = _dh_params_dup(dh_cdata, backend) + return _DHParameters(backend, param_cdata) + + +@utils.register_interface(dh.DHParametersWithSerialization) +class _DHParameters(object): + def __init__(self, backend, dh_cdata): + self._backend = backend + self._dh_cdata = dh_cdata + + def parameter_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + return dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val + ) + + def generate_private_key(self): + return self._backend.generate_dh_private_key(self) + + def parameter_bytes(self, encoding, format): + if format is not serialization.ParameterFormat.PKCS3: + raise ValueError( + "Only PKCS3 serialization is supported" + ) + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION) + + return self._backend._parameter_bytes( + encoding, + format, + self._dh_cdata + ) + + +def _handle_dh_compute_key_error(errors, backend): + lib = backend._lib + + backend.openssl_assert( + errors[0]._lib_reason_match( + lib.ERR_LIB_DH, lib.DH_R_INVALID_PUBKEY + ) + ) + + raise ValueError("Public key value is invalid for this exchange.") + + +def _get_dh_num_bits(backend, dh_cdata): + p = backend._ffi.new("BIGNUM **") + backend._lib.DH_get0_pqg(dh_cdata, p, + backend._ffi.NULL, + backend._ffi.NULL) + backend.openssl_assert(p[0] != backend._ffi.NULL) + return backend._lib.BN_num_bits(p[0]) + + +@utils.register_interface(dh.DHPrivateKeyWithSerialization) +class _DHPrivateKey(object): + def __init__(self, backend, dh_cdata, evp_pkey): + self._backend = backend + self._dh_cdata = dh_cdata + self._evp_pkey = evp_pkey + self._key_size_bytes = self._backend._lib.DH_size(dh_cdata) + + @property + def key_size(self): + return _get_dh_num_bits(self._backend, self._dh_cdata) + + def private_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + pub_key = self._backend._ffi.new("BIGNUM **") + priv_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(self._dh_cdata, pub_key, priv_key) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(priv_key[0] != self._backend._ffi.NULL) + return dh.DHPrivateNumbers( + public_numbers=dh.DHPublicNumbers( + parameter_numbers=dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val + ), + y=self._backend._bn_to_int(pub_key[0]) + ), + x=self._backend._bn_to_int(priv_key[0]) + ) + + def exchange(self, peer_public_key): + + buf = self._backend._ffi.new("unsigned char[]", self._key_size_bytes) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(peer_public_key._dh_cdata, pub_key, + self._backend._ffi.NULL) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + res = self._backend._lib.DH_compute_key( + buf, + pub_key[0], + self._dh_cdata + ) + + if res == -1: + errors = self._backend._consume_errors() + return _handle_dh_compute_key_error(errors, self._backend) + else: + self._backend.openssl_assert(res >= 1) + + key = self._backend._ffi.buffer(buf)[:res] + pad = self._key_size_bytes - len(key) + + if pad > 0: + key = (b"\x00" * pad) + key + + return key + + def public_key(self): + dh_cdata = _dh_params_dup(self._dh_cdata, self._backend) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(self._dh_cdata, + pub_key, self._backend._ffi.NULL) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + pub_key_dup = self._backend._lib.BN_dup(pub_key[0]) + self._backend.openssl_assert(pub_key_dup != self._backend._ffi.NULL) + + res = self._backend._lib.DH_set0_key(dh_cdata, + pub_key_dup, + self._backend._ffi.NULL) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._dh_cdata_to_evp_pkey(dh_cdata) + return _DHPublicKey(self._backend, dh_cdata, evp_pkey) + + def parameters(self): + return _dh_cdata_to_parameters(self._dh_cdata, self._backend) + + def private_bytes(self, encoding, format, encryption_algorithm): + if format is not serialization.PrivateFormat.PKCS8: + raise ValueError( + "DH private keys support only PKCS8 serialization" + ) + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION) + + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._dh_cdata + ) + + +@utils.register_interface(dh.DHPublicKeyWithSerialization) +class _DHPublicKey(object): + def __init__(self, backend, dh_cdata, evp_pkey): + self._backend = backend + self._dh_cdata = dh_cdata + self._evp_pkey = evp_pkey + self._key_size_bits = _get_dh_num_bits(self._backend, self._dh_cdata) + + @property + def key_size(self): + return self._key_size_bits + + def public_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + if q[0] == self._backend._ffi.NULL: + q_val = None + else: + q_val = self._backend._bn_to_int(q[0]) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_key(self._dh_cdata, + pub_key, self._backend._ffi.NULL) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + return dh.DHPublicNumbers( + parameter_numbers=dh.DHParameterNumbers( + p=self._backend._bn_to_int(p[0]), + g=self._backend._bn_to_int(g[0]), + q=q_val + ), + y=self._backend._bn_to_int(pub_key[0]) + ) + + def parameters(self): + return _dh_cdata_to_parameters(self._dh_cdata, self._backend) + + def public_bytes(self, encoding, format): + if format is not serialization.PublicFormat.SubjectPublicKeyInfo: + raise ValueError( + "DH public keys support only " + "SubjectPublicKeyInfo serialization" + ) + + if not self._backend._lib.Cryptography_HAS_EVP_PKEY_DHX: + q = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DH_get0_pqg(self._dh_cdata, + self._backend._ffi.NULL, + q, + self._backend._ffi.NULL) + if q[0] != self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "DH X9.42 serialization is not supported", + _Reasons.UNSUPPORTED_SERIALIZATION) + + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + None + ) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/dsa.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/dsa.py new file mode 100644 index 0000000..48886e4 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/dsa.py @@ -0,0 +1,269 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, _check_not_prehashed, + _warn_sign_verify_deprecated +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ( + AsymmetricSignatureContext, AsymmetricVerificationContext, dsa +) + + +def _dsa_sig_sign(backend, private_key, data): + sig_buf_len = backend._lib.DSA_size(private_key._dsa_cdata) + sig_buf = backend._ffi.new("unsigned char[]", sig_buf_len) + buflen = backend._ffi.new("unsigned int *") + + # The first parameter passed to DSA_sign is unused by OpenSSL but + # must be an integer. + res = backend._lib.DSA_sign( + 0, data, len(data), sig_buf, buflen, private_key._dsa_cdata + ) + backend.openssl_assert(res == 1) + backend.openssl_assert(buflen[0]) + + return backend._ffi.buffer(sig_buf)[:buflen[0]] + + +def _dsa_sig_verify(backend, public_key, signature, data): + # The first parameter passed to DSA_verify is unused by OpenSSL but + # must be an integer. + res = backend._lib.DSA_verify( + 0, data, len(data), signature, len(signature), public_key._dsa_cdata + ) + + if res != 1: + backend._consume_errors() + raise InvalidSignature + + +@utils.register_interface(AsymmetricVerificationContext) +class _DSAVerificationContext(object): + def __init__(self, backend, public_key, signature, algorithm): + self._backend = backend + self._public_key = public_key + self._signature = signature + self._algorithm = algorithm + + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def verify(self): + data_to_verify = self._hash_ctx.finalize() + + _dsa_sig_verify( + self._backend, self._public_key, self._signature, data_to_verify + ) + + +@utils.register_interface(AsymmetricSignatureContext) +class _DSASignatureContext(object): + def __init__(self, backend, private_key, algorithm): + self._backend = backend + self._private_key = private_key + self._algorithm = algorithm + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def finalize(self): + data_to_sign = self._hash_ctx.finalize() + return _dsa_sig_sign(self._backend, self._private_key, data_to_sign) + + +@utils.register_interface(dsa.DSAParametersWithNumbers) +class _DSAParameters(object): + def __init__(self, backend, dsa_cdata): + self._backend = backend + self._dsa_cdata = dsa_cdata + + def parameter_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + return dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]) + ) + + def generate_private_key(self): + return self._backend.generate_dsa_private_key(self) + + +@utils.register_interface(dsa.DSAPrivateKeyWithSerialization) +class _DSAPrivateKey(object): + def __init__(self, backend, dsa_cdata, evp_pkey): + self._backend = backend + self._dsa_cdata = dsa_cdata + self._evp_pkey = evp_pkey + + p = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg( + dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL + ) + self._backend.openssl_assert(p[0] != backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(p[0]) + + key_size = utils.read_only_property("_key_size") + + def signer(self, signature_algorithm): + _warn_sign_verify_deprecated() + _check_not_prehashed(signature_algorithm) + return _DSASignatureContext(self._backend, self, signature_algorithm) + + def private_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + pub_key = self._backend._ffi.new("BIGNUM **") + priv_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + self._backend._lib.DSA_get0_key(self._dsa_cdata, pub_key, priv_key) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(priv_key[0] != self._backend._ffi.NULL) + return dsa.DSAPrivateNumbers( + public_numbers=dsa.DSAPublicNumbers( + parameter_numbers=dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]) + ), + y=self._backend._bn_to_int(pub_key[0]) + ), + x=self._backend._bn_to_int(priv_key[0]) + ) + + def public_key(self): + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_key( + self._dsa_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + pub_key_dup = self._backend._lib.BN_dup(pub_key[0]) + res = self._backend._lib.DSA_set0_key( + dsa_cdata, pub_key_dup, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._dsa_cdata_to_evp_pkey(dsa_cdata) + return _DSAPublicKey(self._backend, dsa_cdata, evp_pkey) + + def parameters(self): + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + return _DSAParameters(self._backend, dsa_cdata) + + def private_bytes(self, encoding, format, encryption_algorithm): + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._dsa_cdata + ) + + def sign(self, data, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _dsa_sig_sign(self._backend, self, data) + + +@utils.register_interface(dsa.DSAPublicKeyWithSerialization) +class _DSAPublicKey(object): + def __init__(self, backend, dsa_cdata, evp_pkey): + self._backend = backend + self._dsa_cdata = dsa_cdata + self._evp_pkey = evp_pkey + p = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg( + dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL + ) + self._backend.openssl_assert(p[0] != backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(p[0]) + + key_size = utils.read_only_property("_key_size") + + def verifier(self, signature, signature_algorithm): + _warn_sign_verify_deprecated() + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + + _check_not_prehashed(signature_algorithm) + return _DSAVerificationContext( + self._backend, self, signature, signature_algorithm + ) + + def public_numbers(self): + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + g = self._backend._ffi.new("BIGNUM **") + pub_key = self._backend._ffi.new("BIGNUM **") + self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(g[0] != self._backend._ffi.NULL) + self._backend._lib.DSA_get0_key( + self._dsa_cdata, pub_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL) + return dsa.DSAPublicNumbers( + parameter_numbers=dsa.DSAParameterNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + g=self._backend._bn_to_int(g[0]) + ), + y=self._backend._bn_to_int(pub_key[0]) + ) + + def parameters(self): + dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata) + dsa_cdata = self._backend._ffi.gc( + dsa_cdata, self._backend._lib.DSA_free + ) + return _DSAParameters(self._backend, dsa_cdata) + + def public_bytes(self, encoding, format): + if format is serialization.PublicFormat.PKCS1: + raise ValueError( + "DSA public keys do not support PKCS1 serialization" + ) + + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + None + ) + + def verify(self, signature, data, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _dsa_sig_verify(self._backend, self, signature, data) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/ec.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/ec.py new file mode 100644 index 0000000..69da234 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/ec.py @@ -0,0 +1,298 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, _check_not_prehashed, + _warn_sign_verify_deprecated +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ( + AsymmetricSignatureContext, AsymmetricVerificationContext, ec +) + + +def _check_signature_algorithm(signature_algorithm): + if not isinstance(signature_algorithm, ec.ECDSA): + raise UnsupportedAlgorithm( + "Unsupported elliptic curve signature algorithm.", + _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM) + + +def _ec_key_curve_sn(backend, ec_key): + group = backend._lib.EC_KEY_get0_group(ec_key) + backend.openssl_assert(group != backend._ffi.NULL) + + nid = backend._lib.EC_GROUP_get_curve_name(group) + # The following check is to find EC keys with unnamed curves and raise + # an error for now. + if nid == backend._lib.NID_undef: + raise NotImplementedError( + "ECDSA certificates with unnamed curves are unsupported " + "at this time" + ) + + curve_name = backend._lib.OBJ_nid2sn(nid) + backend.openssl_assert(curve_name != backend._ffi.NULL) + + sn = backend._ffi.string(curve_name).decode('ascii') + return sn + + +def _mark_asn1_named_ec_curve(backend, ec_cdata): + """ + Set the named curve flag on the EC_KEY. This causes OpenSSL to + serialize EC keys along with their curve OID which makes + deserialization easier. + """ + + backend._lib.EC_KEY_set_asn1_flag( + ec_cdata, backend._lib.OPENSSL_EC_NAMED_CURVE + ) + + +def _sn_to_elliptic_curve(backend, sn): + try: + return ec._CURVE_TYPES[sn]() + except KeyError: + raise UnsupportedAlgorithm( + "{0} is not a supported elliptic curve".format(sn), + _Reasons.UNSUPPORTED_ELLIPTIC_CURVE + ) + + +def _ecdsa_sig_sign(backend, private_key, data): + max_size = backend._lib.ECDSA_size(private_key._ec_key) + backend.openssl_assert(max_size > 0) + + sigbuf = backend._ffi.new("unsigned char[]", max_size) + siglen_ptr = backend._ffi.new("unsigned int[]", 1) + res = backend._lib.ECDSA_sign( + 0, data, len(data), sigbuf, siglen_ptr, private_key._ec_key + ) + backend.openssl_assert(res == 1) + return backend._ffi.buffer(sigbuf)[:siglen_ptr[0]] + + +def _ecdsa_sig_verify(backend, public_key, signature, data): + res = backend._lib.ECDSA_verify( + 0, data, len(data), signature, len(signature), public_key._ec_key + ) + if res != 1: + backend._consume_errors() + raise InvalidSignature + + +@utils.register_interface(AsymmetricSignatureContext) +class _ECDSASignatureContext(object): + def __init__(self, backend, private_key, algorithm): + self._backend = backend + self._private_key = private_key + self._digest = hashes.Hash(algorithm, backend) + + def update(self, data): + self._digest.update(data) + + def finalize(self): + digest = self._digest.finalize() + + return _ecdsa_sig_sign(self._backend, self._private_key, digest) + + +@utils.register_interface(AsymmetricVerificationContext) +class _ECDSAVerificationContext(object): + def __init__(self, backend, public_key, signature, algorithm): + self._backend = backend + self._public_key = public_key + self._signature = signature + self._digest = hashes.Hash(algorithm, backend) + + def update(self, data): + self._digest.update(data) + + def verify(self): + digest = self._digest.finalize() + _ecdsa_sig_verify( + self._backend, self._public_key, self._signature, digest + ) + + +@utils.register_interface(ec.EllipticCurvePrivateKeyWithSerialization) +class _EllipticCurvePrivateKey(object): + def __init__(self, backend, ec_key_cdata, evp_pkey): + self._backend = backend + _mark_asn1_named_ec_curve(backend, ec_key_cdata) + self._ec_key = ec_key_cdata + self._evp_pkey = evp_pkey + + sn = _ec_key_curve_sn(backend, ec_key_cdata) + self._curve = _sn_to_elliptic_curve(backend, sn) + + curve = utils.read_only_property("_curve") + + @property + def key_size(self): + return self.curve.key_size + + def signer(self, signature_algorithm): + _warn_sign_verify_deprecated() + _check_signature_algorithm(signature_algorithm) + _check_not_prehashed(signature_algorithm.algorithm) + return _ECDSASignatureContext( + self._backend, self, signature_algorithm.algorithm + ) + + def exchange(self, algorithm, peer_public_key): + if not ( + self._backend.elliptic_curve_exchange_algorithm_supported( + algorithm, self.curve + ) + ): + raise UnsupportedAlgorithm( + "This backend does not support the ECDH algorithm.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + + if peer_public_key.curve.name != self.curve.name: + raise ValueError( + "peer_public_key and self are not on the same curve" + ) + + group = self._backend._lib.EC_KEY_get0_group(self._ec_key) + z_len = (self._backend._lib.EC_GROUP_get_degree(group) + 7) // 8 + self._backend.openssl_assert(z_len > 0) + z_buf = self._backend._ffi.new("uint8_t[]", z_len) + peer_key = self._backend._lib.EC_KEY_get0_public_key( + peer_public_key._ec_key + ) + + r = self._backend._lib.ECDH_compute_key( + z_buf, z_len, peer_key, self._ec_key, self._backend._ffi.NULL + ) + self._backend.openssl_assert(r > 0) + return self._backend._ffi.buffer(z_buf)[:z_len] + + def public_key(self): + group = self._backend._lib.EC_KEY_get0_group(self._ec_key) + self._backend.openssl_assert(group != self._backend._ffi.NULL) + + curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group) + + public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid) + self._backend.openssl_assert(public_ec_key != self._backend._ffi.NULL) + public_ec_key = self._backend._ffi.gc( + public_ec_key, self._backend._lib.EC_KEY_free + ) + + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + + res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point) + self._backend.openssl_assert(res == 1) + + evp_pkey = self._backend._ec_cdata_to_evp_pkey(public_ec_key) + + return _EllipticCurvePublicKey(self._backend, public_ec_key, evp_pkey) + + def private_numbers(self): + bn = self._backend._lib.EC_KEY_get0_private_key(self._ec_key) + private_value = self._backend._bn_to_int(bn) + return ec.EllipticCurvePrivateNumbers( + private_value=private_value, + public_numbers=self.public_key().public_numbers() + ) + + def private_bytes(self, encoding, format, encryption_algorithm): + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._ec_key + ) + + def sign(self, data, signature_algorithm): + _check_signature_algorithm(signature_algorithm) + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, signature_algorithm._algorithm + ) + return _ecdsa_sig_sign(self._backend, self, data) + + +@utils.register_interface(ec.EllipticCurvePublicKeyWithSerialization) +class _EllipticCurvePublicKey(object): + def __init__(self, backend, ec_key_cdata, evp_pkey): + self._backend = backend + _mark_asn1_named_ec_curve(backend, ec_key_cdata) + self._ec_key = ec_key_cdata + self._evp_pkey = evp_pkey + + sn = _ec_key_curve_sn(backend, ec_key_cdata) + self._curve = _sn_to_elliptic_curve(backend, sn) + + curve = utils.read_only_property("_curve") + + @property + def key_size(self): + return self.curve.key_size + + def verifier(self, signature, signature_algorithm): + _warn_sign_verify_deprecated() + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + + _check_signature_algorithm(signature_algorithm) + _check_not_prehashed(signature_algorithm.algorithm) + return _ECDSAVerificationContext( + self._backend, self, signature, signature_algorithm.algorithm + ) + + def public_numbers(self): + get_func, group = ( + self._backend._ec_key_determine_group_get_func(self._ec_key) + ) + point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key) + self._backend.openssl_assert(point != self._backend._ffi.NULL) + + with self._backend._tmp_bn_ctx() as bn_ctx: + bn_x = self._backend._lib.BN_CTX_get(bn_ctx) + bn_y = self._backend._lib.BN_CTX_get(bn_ctx) + + res = get_func(group, point, bn_x, bn_y, bn_ctx) + self._backend.openssl_assert(res == 1) + + x = self._backend._bn_to_int(bn_x) + y = self._backend._bn_to_int(bn_y) + + return ec.EllipticCurvePublicNumbers( + x=x, + y=y, + curve=self._curve + ) + + def public_bytes(self, encoding, format): + if format is serialization.PublicFormat.PKCS1: + raise ValueError( + "EC public keys do not support PKCS1 serialization" + ) + + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + None + ) + + def verify(self, signature, data, signature_algorithm): + _check_signature_algorithm(signature_algorithm) + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, signature_algorithm._algorithm + ) + _ecdsa_sig_verify(self._backend, self, signature, data) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/encode_asn1.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/encode_asn1.py new file mode 100644 index 0000000..a2c7ed7 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/encode_asn1.py @@ -0,0 +1,611 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import calendar +import ipaddress + +import six + +from cryptography import utils, x509 +from cryptography.hazmat.backends.openssl.decode_asn1 import ( + _CRL_ENTRY_REASON_ENUM_TO_CODE, _DISTPOINT_TYPE_FULLNAME, + _DISTPOINT_TYPE_RELATIVENAME +) +from cryptography.x509.name import _ASN1Type +from cryptography.x509.oid import CRLEntryExtensionOID, ExtensionOID + + +def _encode_asn1_int(backend, x): + """ + Converts a python integer to an ASN1_INTEGER. The returned ASN1_INTEGER + will not be garbage collected (to support adding them to structs that take + ownership of the object). Be sure to register it for GC if it will be + discarded after use. + + """ + # Convert Python integer to OpenSSL "bignum" in case value exceeds + # machine's native integer limits (note: `int_to_bn` doesn't automatically + # GC). + i = backend._int_to_bn(x) + i = backend._ffi.gc(i, backend._lib.BN_free) + + # Wrap in an ASN.1 integer. Don't GC -- as documented. + i = backend._lib.BN_to_ASN1_INTEGER(i, backend._ffi.NULL) + backend.openssl_assert(i != backend._ffi.NULL) + return i + + +def _encode_asn1_int_gc(backend, x): + i = _encode_asn1_int(backend, x) + i = backend._ffi.gc(i, backend._lib.ASN1_INTEGER_free) + return i + + +def _encode_asn1_str(backend, data, length): + """ + Create an ASN1_OCTET_STRING from a Python byte string. + """ + s = backend._lib.ASN1_OCTET_STRING_new() + res = backend._lib.ASN1_OCTET_STRING_set(s, data, length) + backend.openssl_assert(res == 1) + return s + + +def _encode_asn1_utf8_str(backend, string): + """ + Create an ASN1_UTF8STRING from a Python unicode string. + This object will be an ASN1_STRING with UTF8 type in OpenSSL and + can be decoded with ASN1_STRING_to_UTF8. + """ + s = backend._lib.ASN1_UTF8STRING_new() + res = backend._lib.ASN1_STRING_set( + s, string.encode("utf8"), len(string.encode("utf8")) + ) + backend.openssl_assert(res == 1) + return s + + +def _encode_asn1_str_gc(backend, data, length): + s = _encode_asn1_str(backend, data, length) + s = backend._ffi.gc(s, backend._lib.ASN1_OCTET_STRING_free) + return s + + +def _encode_inhibit_any_policy(backend, inhibit_any_policy): + return _encode_asn1_int_gc(backend, inhibit_any_policy.skip_certs) + + +def _encode_name(backend, name): + """ + The X509_NAME created will not be gc'd. Use _encode_name_gc if needed. + """ + subject = backend._lib.X509_NAME_new() + for rdn in name.rdns: + set_flag = 0 # indicate whether to add to last RDN or create new RDN + for attribute in rdn: + name_entry = _encode_name_entry(backend, attribute) + # X509_NAME_add_entry dups the object so we need to gc this copy + name_entry = backend._ffi.gc( + name_entry, backend._lib.X509_NAME_ENTRY_free + ) + res = backend._lib.X509_NAME_add_entry( + subject, name_entry, -1, set_flag) + backend.openssl_assert(res == 1) + set_flag = -1 + return subject + + +def _encode_name_gc(backend, attributes): + subject = _encode_name(backend, attributes) + subject = backend._ffi.gc(subject, backend._lib.X509_NAME_free) + return subject + + +def _encode_sk_name_entry(backend, attributes): + """ + The sk_X509_NAME_ENTRY created will not be gc'd. + """ + stack = backend._lib.sk_X509_NAME_ENTRY_new_null() + for attribute in attributes: + name_entry = _encode_name_entry(backend, attribute) + res = backend._lib.sk_X509_NAME_ENTRY_push(stack, name_entry) + backend.openssl_assert(res == 1) + return stack + + +def _encode_name_entry(backend, attribute): + if attribute._type is _ASN1Type.BMPString: + value = attribute.value.encode('utf_16_be') + else: + value = attribute.value.encode('utf8') + + obj = _txt2obj_gc(backend, attribute.oid.dotted_string) + + name_entry = backend._lib.X509_NAME_ENTRY_create_by_OBJ( + backend._ffi.NULL, obj, attribute._type.value, value, len(value) + ) + return name_entry + + +def _encode_crl_number_delta_crl_indicator(backend, ext): + return _encode_asn1_int_gc(backend, ext.crl_number) + + +def _encode_crl_reason(backend, crl_reason): + asn1enum = backend._lib.ASN1_ENUMERATED_new() + backend.openssl_assert(asn1enum != backend._ffi.NULL) + asn1enum = backend._ffi.gc(asn1enum, backend._lib.ASN1_ENUMERATED_free) + res = backend._lib.ASN1_ENUMERATED_set( + asn1enum, _CRL_ENTRY_REASON_ENUM_TO_CODE[crl_reason.reason] + ) + backend.openssl_assert(res == 1) + + return asn1enum + + +def _encode_invalidity_date(backend, invalidity_date): + time = backend._lib.ASN1_GENERALIZEDTIME_set( + backend._ffi.NULL, calendar.timegm( + invalidity_date.invalidity_date.timetuple() + ) + ) + backend.openssl_assert(time != backend._ffi.NULL) + time = backend._ffi.gc(time, backend._lib.ASN1_GENERALIZEDTIME_free) + + return time + + +def _encode_certificate_policies(backend, certificate_policies): + cp = backend._lib.sk_POLICYINFO_new_null() + backend.openssl_assert(cp != backend._ffi.NULL) + cp = backend._ffi.gc(cp, backend._lib.sk_POLICYINFO_free) + for policy_info in certificate_policies: + pi = backend._lib.POLICYINFO_new() + backend.openssl_assert(pi != backend._ffi.NULL) + res = backend._lib.sk_POLICYINFO_push(cp, pi) + backend.openssl_assert(res >= 1) + oid = _txt2obj(backend, policy_info.policy_identifier.dotted_string) + pi.policyid = oid + if policy_info.policy_qualifiers: + pqis = backend._lib.sk_POLICYQUALINFO_new_null() + backend.openssl_assert(pqis != backend._ffi.NULL) + for qualifier in policy_info.policy_qualifiers: + pqi = backend._lib.POLICYQUALINFO_new() + backend.openssl_assert(pqi != backend._ffi.NULL) + res = backend._lib.sk_POLICYQUALINFO_push(pqis, pqi) + backend.openssl_assert(res >= 1) + if isinstance(qualifier, six.text_type): + pqi.pqualid = _txt2obj( + backend, x509.OID_CPS_QUALIFIER.dotted_string + ) + pqi.d.cpsuri = _encode_asn1_str( + backend, + qualifier.encode("ascii"), + len(qualifier.encode("ascii")) + ) + else: + assert isinstance(qualifier, x509.UserNotice) + pqi.pqualid = _txt2obj( + backend, x509.OID_CPS_USER_NOTICE.dotted_string + ) + un = backend._lib.USERNOTICE_new() + backend.openssl_assert(un != backend._ffi.NULL) + pqi.d.usernotice = un + if qualifier.explicit_text: + un.exptext = _encode_asn1_utf8_str( + backend, qualifier.explicit_text + ) + + un.noticeref = _encode_notice_reference( + backend, qualifier.notice_reference + ) + + pi.qualifiers = pqis + + return cp + + +def _encode_notice_reference(backend, notice): + if notice is None: + return backend._ffi.NULL + else: + nr = backend._lib.NOTICEREF_new() + backend.openssl_assert(nr != backend._ffi.NULL) + # organization is a required field + nr.organization = _encode_asn1_utf8_str(backend, notice.organization) + + notice_stack = backend._lib.sk_ASN1_INTEGER_new_null() + nr.noticenos = notice_stack + for number in notice.notice_numbers: + num = _encode_asn1_int(backend, number) + res = backend._lib.sk_ASN1_INTEGER_push(notice_stack, num) + backend.openssl_assert(res >= 1) + + return nr + + +def _txt2obj(backend, name): + """ + Converts a Python string with an ASN.1 object ID in dotted form to a + ASN1_OBJECT. + """ + name = name.encode('ascii') + obj = backend._lib.OBJ_txt2obj(name, 1) + backend.openssl_assert(obj != backend._ffi.NULL) + return obj + + +def _txt2obj_gc(backend, name): + obj = _txt2obj(backend, name) + obj = backend._ffi.gc(obj, backend._lib.ASN1_OBJECT_free) + return obj + + +def _encode_ocsp_nocheck(backend, ext): + """ + The OCSP No Check extension is defined as a null ASN.1 value embedded in + an ASN.1 string. + """ + return _encode_asn1_str_gc(backend, b"\x05\x00", 2) + + +def _encode_key_usage(backend, key_usage): + set_bit = backend._lib.ASN1_BIT_STRING_set_bit + ku = backend._lib.ASN1_BIT_STRING_new() + ku = backend._ffi.gc(ku, backend._lib.ASN1_BIT_STRING_free) + res = set_bit(ku, 0, key_usage.digital_signature) + backend.openssl_assert(res == 1) + res = set_bit(ku, 1, key_usage.content_commitment) + backend.openssl_assert(res == 1) + res = set_bit(ku, 2, key_usage.key_encipherment) + backend.openssl_assert(res == 1) + res = set_bit(ku, 3, key_usage.data_encipherment) + backend.openssl_assert(res == 1) + res = set_bit(ku, 4, key_usage.key_agreement) + backend.openssl_assert(res == 1) + res = set_bit(ku, 5, key_usage.key_cert_sign) + backend.openssl_assert(res == 1) + res = set_bit(ku, 6, key_usage.crl_sign) + backend.openssl_assert(res == 1) + if key_usage.key_agreement: + res = set_bit(ku, 7, key_usage.encipher_only) + backend.openssl_assert(res == 1) + res = set_bit(ku, 8, key_usage.decipher_only) + backend.openssl_assert(res == 1) + else: + res = set_bit(ku, 7, 0) + backend.openssl_assert(res == 1) + res = set_bit(ku, 8, 0) + backend.openssl_assert(res == 1) + + return ku + + +def _encode_authority_key_identifier(backend, authority_keyid): + akid = backend._lib.AUTHORITY_KEYID_new() + backend.openssl_assert(akid != backend._ffi.NULL) + akid = backend._ffi.gc(akid, backend._lib.AUTHORITY_KEYID_free) + if authority_keyid.key_identifier is not None: + akid.keyid = _encode_asn1_str( + backend, + authority_keyid.key_identifier, + len(authority_keyid.key_identifier) + ) + + if authority_keyid.authority_cert_issuer is not None: + akid.issuer = _encode_general_names( + backend, authority_keyid.authority_cert_issuer + ) + + if authority_keyid.authority_cert_serial_number is not None: + akid.serial = _encode_asn1_int( + backend, authority_keyid.authority_cert_serial_number + ) + + return akid + + +def _encode_basic_constraints(backend, basic_constraints): + constraints = backend._lib.BASIC_CONSTRAINTS_new() + constraints = backend._ffi.gc( + constraints, backend._lib.BASIC_CONSTRAINTS_free + ) + constraints.ca = 255 if basic_constraints.ca else 0 + if basic_constraints.ca and basic_constraints.path_length is not None: + constraints.pathlen = _encode_asn1_int( + backend, basic_constraints.path_length + ) + + return constraints + + +def _encode_authority_information_access(backend, authority_info_access): + aia = backend._lib.sk_ACCESS_DESCRIPTION_new_null() + backend.openssl_assert(aia != backend._ffi.NULL) + aia = backend._ffi.gc( + aia, backend._lib.sk_ACCESS_DESCRIPTION_free + ) + for access_description in authority_info_access: + ad = backend._lib.ACCESS_DESCRIPTION_new() + method = _txt2obj( + backend, access_description.access_method.dotted_string + ) + gn = _encode_general_name(backend, access_description.access_location) + ad.method = method + ad.location = gn + res = backend._lib.sk_ACCESS_DESCRIPTION_push(aia, ad) + backend.openssl_assert(res >= 1) + + return aia + + +def _encode_general_names(backend, names): + general_names = backend._lib.GENERAL_NAMES_new() + backend.openssl_assert(general_names != backend._ffi.NULL) + for name in names: + gn = _encode_general_name(backend, name) + res = backend._lib.sk_GENERAL_NAME_push(general_names, gn) + backend.openssl_assert(res != 0) + + return general_names + + +def _encode_alt_name(backend, san): + general_names = _encode_general_names(backend, san) + general_names = backend._ffi.gc( + general_names, backend._lib.GENERAL_NAMES_free + ) + return general_names + + +def _encode_subject_key_identifier(backend, ski): + return _encode_asn1_str_gc(backend, ski.digest, len(ski.digest)) + + +def _encode_general_name(backend, name): + if isinstance(name, x509.DNSName): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + gn.type = backend._lib.GEN_DNS + + ia5 = backend._lib.ASN1_IA5STRING_new() + backend.openssl_assert(ia5 != backend._ffi.NULL) + # ia5strings are supposed to be ITU T.50 but to allow round-tripping + # of broken certs that encode utf8 we'll encode utf8 here too. + value = name.value.encode("utf8") + + res = backend._lib.ASN1_STRING_set(ia5, value, len(value)) + backend.openssl_assert(res == 1) + gn.d.dNSName = ia5 + elif isinstance(name, x509.RegisteredID): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + gn.type = backend._lib.GEN_RID + obj = backend._lib.OBJ_txt2obj( + name.value.dotted_string.encode('ascii'), 1 + ) + backend.openssl_assert(obj != backend._ffi.NULL) + gn.d.registeredID = obj + elif isinstance(name, x509.DirectoryName): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + dir_name = _encode_name(backend, name.value) + gn.type = backend._lib.GEN_DIRNAME + gn.d.directoryName = dir_name + elif isinstance(name, x509.IPAddress): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + if isinstance(name.value, ipaddress.IPv4Network): + packed = ( + name.value.network_address.packed + + utils.int_to_bytes(((1 << 32) - name.value.num_addresses), 4) + ) + elif isinstance(name.value, ipaddress.IPv6Network): + packed = ( + name.value.network_address.packed + + utils.int_to_bytes((1 << 128) - name.value.num_addresses, 16) + ) + else: + packed = name.value.packed + ipaddr = _encode_asn1_str(backend, packed, len(packed)) + gn.type = backend._lib.GEN_IPADD + gn.d.iPAddress = ipaddr + elif isinstance(name, x509.OtherName): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + other_name = backend._lib.OTHERNAME_new() + backend.openssl_assert(other_name != backend._ffi.NULL) + + type_id = backend._lib.OBJ_txt2obj( + name.type_id.dotted_string.encode('ascii'), 1 + ) + backend.openssl_assert(type_id != backend._ffi.NULL) + data = backend._ffi.new("unsigned char[]", name.value) + data_ptr_ptr = backend._ffi.new("unsigned char **") + data_ptr_ptr[0] = data + value = backend._lib.d2i_ASN1_TYPE( + backend._ffi.NULL, data_ptr_ptr, len(name.value) + ) + if value == backend._ffi.NULL: + backend._consume_errors() + raise ValueError("Invalid ASN.1 data") + other_name.type_id = type_id + other_name.value = value + gn.type = backend._lib.GEN_OTHERNAME + gn.d.otherName = other_name + elif isinstance(name, x509.RFC822Name): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + # ia5strings are supposed to be ITU T.50 but to allow round-tripping + # of broken certs that encode utf8 we'll encode utf8 here too. + data = name.value.encode("utf8") + asn1_str = _encode_asn1_str(backend, data, len(data)) + gn.type = backend._lib.GEN_EMAIL + gn.d.rfc822Name = asn1_str + elif isinstance(name, x509.UniformResourceIdentifier): + gn = backend._lib.GENERAL_NAME_new() + backend.openssl_assert(gn != backend._ffi.NULL) + # ia5strings are supposed to be ITU T.50 but to allow round-tripping + # of broken certs that encode utf8 we'll encode utf8 here too. + data = name.value.encode("utf8") + asn1_str = _encode_asn1_str(backend, data, len(data)) + gn.type = backend._lib.GEN_URI + gn.d.uniformResourceIdentifier = asn1_str + else: + raise ValueError( + "{0} is an unknown GeneralName type".format(name) + ) + + return gn + + +def _encode_extended_key_usage(backend, extended_key_usage): + eku = backend._lib.sk_ASN1_OBJECT_new_null() + eku = backend._ffi.gc(eku, backend._lib.sk_ASN1_OBJECT_free) + for oid in extended_key_usage: + obj = _txt2obj(backend, oid.dotted_string) + res = backend._lib.sk_ASN1_OBJECT_push(eku, obj) + backend.openssl_assert(res >= 1) + + return eku + + +_CRLREASONFLAGS = { + x509.ReasonFlags.key_compromise: 1, + x509.ReasonFlags.ca_compromise: 2, + x509.ReasonFlags.affiliation_changed: 3, + x509.ReasonFlags.superseded: 4, + x509.ReasonFlags.cessation_of_operation: 5, + x509.ReasonFlags.certificate_hold: 6, + x509.ReasonFlags.privilege_withdrawn: 7, + x509.ReasonFlags.aa_compromise: 8, +} + + +def _encode_cdps_freshest_crl(backend, cdps): + cdp = backend._lib.sk_DIST_POINT_new_null() + cdp = backend._ffi.gc(cdp, backend._lib.sk_DIST_POINT_free) + for point in cdps: + dp = backend._lib.DIST_POINT_new() + backend.openssl_assert(dp != backend._ffi.NULL) + + if point.reasons: + bitmask = backend._lib.ASN1_BIT_STRING_new() + backend.openssl_assert(bitmask != backend._ffi.NULL) + dp.reasons = bitmask + for reason in point.reasons: + res = backend._lib.ASN1_BIT_STRING_set_bit( + bitmask, _CRLREASONFLAGS[reason], 1 + ) + backend.openssl_assert(res == 1) + + if point.full_name: + dpn = backend._lib.DIST_POINT_NAME_new() + backend.openssl_assert(dpn != backend._ffi.NULL) + dpn.type = _DISTPOINT_TYPE_FULLNAME + dpn.name.fullname = _encode_general_names(backend, point.full_name) + dp.distpoint = dpn + + if point.relative_name: + dpn = backend._lib.DIST_POINT_NAME_new() + backend.openssl_assert(dpn != backend._ffi.NULL) + dpn.type = _DISTPOINT_TYPE_RELATIVENAME + relativename = _encode_sk_name_entry(backend, point.relative_name) + backend.openssl_assert(relativename != backend._ffi.NULL) + dpn.name.relativename = relativename + dp.distpoint = dpn + + if point.crl_issuer: + dp.CRLissuer = _encode_general_names(backend, point.crl_issuer) + + res = backend._lib.sk_DIST_POINT_push(cdp, dp) + backend.openssl_assert(res >= 1) + + return cdp + + +def _encode_name_constraints(backend, name_constraints): + nc = backend._lib.NAME_CONSTRAINTS_new() + backend.openssl_assert(nc != backend._ffi.NULL) + nc = backend._ffi.gc(nc, backend._lib.NAME_CONSTRAINTS_free) + permitted = _encode_general_subtree( + backend, name_constraints.permitted_subtrees + ) + nc.permittedSubtrees = permitted + excluded = _encode_general_subtree( + backend, name_constraints.excluded_subtrees + ) + nc.excludedSubtrees = excluded + + return nc + + +def _encode_policy_constraints(backend, policy_constraints): + pc = backend._lib.POLICY_CONSTRAINTS_new() + backend.openssl_assert(pc != backend._ffi.NULL) + pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free) + if policy_constraints.require_explicit_policy is not None: + pc.requireExplicitPolicy = _encode_asn1_int( + backend, policy_constraints.require_explicit_policy + ) + + if policy_constraints.inhibit_policy_mapping is not None: + pc.inhibitPolicyMapping = _encode_asn1_int( + backend, policy_constraints.inhibit_policy_mapping + ) + + return pc + + +def _encode_general_subtree(backend, subtrees): + if subtrees is None: + return backend._ffi.NULL + else: + general_subtrees = backend._lib.sk_GENERAL_SUBTREE_new_null() + for name in subtrees: + gs = backend._lib.GENERAL_SUBTREE_new() + gs.base = _encode_general_name(backend, name) + res = backend._lib.sk_GENERAL_SUBTREE_push(general_subtrees, gs) + assert res >= 1 + + return general_subtrees + + +_EXTENSION_ENCODE_HANDLERS = { + ExtensionOID.BASIC_CONSTRAINTS: _encode_basic_constraints, + ExtensionOID.SUBJECT_KEY_IDENTIFIER: _encode_subject_key_identifier, + ExtensionOID.KEY_USAGE: _encode_key_usage, + ExtensionOID.SUBJECT_ALTERNATIVE_NAME: _encode_alt_name, + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name, + ExtensionOID.EXTENDED_KEY_USAGE: _encode_extended_key_usage, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier, + ExtensionOID.CERTIFICATE_POLICIES: _encode_certificate_policies, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _encode_authority_information_access + ), + ExtensionOID.CRL_DISTRIBUTION_POINTS: _encode_cdps_freshest_crl, + ExtensionOID.FRESHEST_CRL: _encode_cdps_freshest_crl, + ExtensionOID.INHIBIT_ANY_POLICY: _encode_inhibit_any_policy, + ExtensionOID.OCSP_NO_CHECK: _encode_ocsp_nocheck, + ExtensionOID.NAME_CONSTRAINTS: _encode_name_constraints, + ExtensionOID.POLICY_CONSTRAINTS: _encode_policy_constraints, +} + +_CRL_EXTENSION_ENCODE_HANDLERS = { + ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name, + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier, + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: ( + _encode_authority_information_access + ), + ExtensionOID.CRL_NUMBER: _encode_crl_number_delta_crl_indicator, + ExtensionOID.DELTA_CRL_INDICATOR: _encode_crl_number_delta_crl_indicator, +} + +_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS = { + CRLEntryExtensionOID.CERTIFICATE_ISSUER: _encode_alt_name, + CRLEntryExtensionOID.CRL_REASON: _encode_crl_reason, + CRLEntryExtensionOID.INVALIDITY_DATE: _encode_invalidity_date, +} diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/hashes.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/hashes.py new file mode 100644 index 0000000..92ea53b --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/hashes.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.primitives import hashes + + +@utils.register_interface(hashes.HashContext) +class _HashContext(object): + def __init__(self, backend, algorithm, ctx=None): + self._algorithm = algorithm + + self._backend = backend + + if ctx is None: + ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + name = self._backend._build_openssl_digest_name(algorithm) + evp_md = self._backend._lib.EVP_get_digestbyname(name) + if evp_md == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "{0} is not a supported hash on this backend.".format( + name), + _Reasons.UNSUPPORTED_HASH + ) + res = self._backend._lib.EVP_DigestInit_ex(ctx, evp_md, + self._backend._ffi.NULL) + self._backend.openssl_assert(res != 0) + + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def copy(self): + copied_ctx = self._backend._lib.Cryptography_EVP_MD_CTX_new() + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.Cryptography_EVP_MD_CTX_free + ) + res = self._backend._lib.EVP_MD_CTX_copy_ex(copied_ctx, self._ctx) + self._backend.openssl_assert(res != 0) + return _HashContext(self._backend, self.algorithm, ctx=copied_ctx) + + def update(self, data): + res = self._backend._lib.EVP_DigestUpdate(self._ctx, data, len(data)) + self._backend.openssl_assert(res != 0) + + def finalize(self): + buf = self._backend._ffi.new("unsigned char[]", + self._backend._lib.EVP_MAX_MD_SIZE) + outlen = self._backend._ffi.new("unsigned int *") + res = self._backend._lib.EVP_DigestFinal_ex(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size) + return self._backend._ffi.buffer(buf)[:outlen[0]] diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/hmac.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/hmac.py new file mode 100644 index 0000000..3577f47 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/hmac.py @@ -0,0 +1,73 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.primitives import constant_time, hashes, mac + + +@utils.register_interface(mac.MACContext) +@utils.register_interface(hashes.HashContext) +class _HMACContext(object): + def __init__(self, backend, key, algorithm, ctx=None): + self._algorithm = algorithm + self._backend = backend + + if ctx is None: + ctx = self._backend._lib.Cryptography_HMAC_CTX_new() + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc( + ctx, self._backend._lib.Cryptography_HMAC_CTX_free + ) + name = self._backend._build_openssl_digest_name(algorithm) + evp_md = self._backend._lib.EVP_get_digestbyname(name) + if evp_md == self._backend._ffi.NULL: + raise UnsupportedAlgorithm( + "{0} is not a supported hash on this backend".format(name), + _Reasons.UNSUPPORTED_HASH + ) + res = self._backend._lib.HMAC_Init_ex( + ctx, key, len(key), evp_md, self._backend._ffi.NULL + ) + self._backend.openssl_assert(res != 0) + + self._ctx = ctx + self._key = key + + algorithm = utils.read_only_property("_algorithm") + + def copy(self): + copied_ctx = self._backend._lib.Cryptography_HMAC_CTX_new() + self._backend.openssl_assert(copied_ctx != self._backend._ffi.NULL) + copied_ctx = self._backend._ffi.gc( + copied_ctx, self._backend._lib.Cryptography_HMAC_CTX_free + ) + res = self._backend._lib.HMAC_CTX_copy(copied_ctx, self._ctx) + self._backend.openssl_assert(res != 0) + return _HMACContext( + self._backend, self._key, self.algorithm, ctx=copied_ctx + ) + + def update(self, data): + res = self._backend._lib.HMAC_Update(self._ctx, data, len(data)) + self._backend.openssl_assert(res != 0) + + def finalize(self): + buf = self._backend._ffi.new("unsigned char[]", + self._backend._lib.EVP_MAX_MD_SIZE) + outlen = self._backend._ffi.new("unsigned int *") + res = self._backend._lib.HMAC_Final(self._ctx, buf, outlen) + self._backend.openssl_assert(res != 0) + self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size) + return self._backend._ffi.buffer(buf)[:outlen[0]] + + def verify(self, signature): + digest = self.finalize() + if not constant_time.bytes_eq(digest, signature): + raise InvalidSignature("Signature did not match digest.") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/rsa.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/rsa.py new file mode 100644 index 0000000..9a7bfaa --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/rsa.py @@ -0,0 +1,475 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import math + +from cryptography import utils +from cryptography.exceptions import ( + InvalidSignature, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.openssl.utils import ( + _calculate_digest_and_algorithm, _check_not_prehashed, + _warn_sign_verify_deprecated +) +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import ( + AsymmetricSignatureContext, AsymmetricVerificationContext, rsa +) +from cryptography.hazmat.primitives.asymmetric.padding import ( + AsymmetricPadding, MGF1, OAEP, PKCS1v15, PSS, calculate_max_pss_salt_length +) +from cryptography.hazmat.primitives.asymmetric.rsa import ( + RSAPrivateKeyWithSerialization, RSAPublicKeyWithSerialization +) + + +def _get_rsa_pss_salt_length(pss, key, hash_algorithm): + salt = pss._salt_length + + if salt is MGF1.MAX_LENGTH or salt is PSS.MAX_LENGTH: + return calculate_max_pss_salt_length(key, hash_algorithm) + else: + return salt + + +def _enc_dec_rsa(backend, key, data, padding): + if not isinstance(padding, AsymmetricPadding): + raise TypeError("Padding must be an instance of AsymmetricPadding.") + + if isinstance(padding, PKCS1v15): + padding_enum = backend._lib.RSA_PKCS1_PADDING + elif isinstance(padding, OAEP): + padding_enum = backend._lib.RSA_PKCS1_OAEP_PADDING + + if not isinstance(padding._mgf, MGF1): + raise UnsupportedAlgorithm( + "Only MGF1 is supported by this backend.", + _Reasons.UNSUPPORTED_MGF + ) + + if not backend.rsa_padding_supported(padding): + raise UnsupportedAlgorithm( + "This combination of padding and hash algorithm is not " + "supported by this backend.", + _Reasons.UNSUPPORTED_PADDING + ) + + else: + raise UnsupportedAlgorithm( + "{0} is not supported by this backend.".format( + padding.name + ), + _Reasons.UNSUPPORTED_PADDING + ) + + return _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding) + + +def _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding): + if isinstance(key, _RSAPublicKey): + init = backend._lib.EVP_PKEY_encrypt_init + crypt = backend._lib.EVP_PKEY_encrypt + else: + init = backend._lib.EVP_PKEY_decrypt_init + crypt = backend._lib.EVP_PKEY_decrypt + + pkey_ctx = backend._lib.EVP_PKEY_CTX_new( + key._evp_pkey, backend._ffi.NULL + ) + backend.openssl_assert(pkey_ctx != backend._ffi.NULL) + pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) + res = init(pkey_ctx) + backend.openssl_assert(res == 1) + res = backend._lib.EVP_PKEY_CTX_set_rsa_padding( + pkey_ctx, padding_enum) + backend.openssl_assert(res > 0) + buf_size = backend._lib.EVP_PKEY_size(key._evp_pkey) + backend.openssl_assert(buf_size > 0) + if ( + isinstance(padding, OAEP) and + backend._lib.Cryptography_HAS_RSA_OAEP_MD + ): + mgf1_md = backend._lib.EVP_get_digestbyname( + padding._mgf._algorithm.name.encode("ascii")) + backend.openssl_assert(mgf1_md != backend._ffi.NULL) + res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) + backend.openssl_assert(res > 0) + oaep_md = backend._lib.EVP_get_digestbyname( + padding._algorithm.name.encode("ascii")) + backend.openssl_assert(oaep_md != backend._ffi.NULL) + res = backend._lib.EVP_PKEY_CTX_set_rsa_oaep_md(pkey_ctx, oaep_md) + backend.openssl_assert(res > 0) + + if ( + isinstance(padding, OAEP) and + padding._label is not None and + len(padding._label) > 0 + ): + # set0_rsa_oaep_label takes ownership of the char * so we need to + # copy it into some new memory + labelptr = backend._lib.OPENSSL_malloc(len(padding._label)) + backend.openssl_assert(labelptr != backend._ffi.NULL) + backend._ffi.memmove(labelptr, padding._label, len(padding._label)) + res = backend._lib.EVP_PKEY_CTX_set0_rsa_oaep_label( + pkey_ctx, labelptr, len(padding._label) + ) + backend.openssl_assert(res == 1) + + outlen = backend._ffi.new("size_t *", buf_size) + buf = backend._ffi.new("unsigned char[]", buf_size) + res = crypt(pkey_ctx, buf, outlen, data, len(data)) + if res <= 0: + _handle_rsa_enc_dec_error(backend, key) + + return backend._ffi.buffer(buf)[:outlen[0]] + + +def _handle_rsa_enc_dec_error(backend, key): + errors = backend._consume_errors() + backend.openssl_assert(errors) + assert errors[0].lib == backend._lib.ERR_LIB_RSA + if isinstance(key, _RSAPublicKey): + assert (errors[0].reason == + backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE) + raise ValueError( + "Data too long for key size. Encrypt less data or use a " + "larger key size." + ) + else: + decoding_errors = [ + backend._lib.RSA_R_BLOCK_TYPE_IS_NOT_01, + backend._lib.RSA_R_BLOCK_TYPE_IS_NOT_02, + backend._lib.RSA_R_OAEP_DECODING_ERROR, + # Though this error looks similar to the + # RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE, this occurs on decrypts, + # rather than on encrypts + backend._lib.RSA_R_DATA_TOO_LARGE_FOR_MODULUS, + ] + if backend._lib.Cryptography_HAS_RSA_R_PKCS_DECODING_ERROR: + decoding_errors.append(backend._lib.RSA_R_PKCS_DECODING_ERROR) + + assert errors[0].reason in decoding_errors + raise ValueError("Decryption failed.") + + +def _rsa_sig_determine_padding(backend, key, padding, algorithm): + if not isinstance(padding, AsymmetricPadding): + raise TypeError("Expected provider of AsymmetricPadding.") + + pkey_size = backend._lib.EVP_PKEY_size(key._evp_pkey) + backend.openssl_assert(pkey_size > 0) + + if isinstance(padding, PKCS1v15): + padding_enum = backend._lib.RSA_PKCS1_PADDING + elif isinstance(padding, PSS): + if not isinstance(padding._mgf, MGF1): + raise UnsupportedAlgorithm( + "Only MGF1 is supported by this backend.", + _Reasons.UNSUPPORTED_MGF + ) + + # Size of key in bytes - 2 is the maximum + # PSS signature length (salt length is checked later) + if pkey_size - algorithm.digest_size - 2 < 0: + raise ValueError("Digest too large for key size. Use a larger " + "key or different digest.") + + padding_enum = backend._lib.RSA_PKCS1_PSS_PADDING + else: + raise UnsupportedAlgorithm( + "{0} is not supported by this backend.".format(padding.name), + _Reasons.UNSUPPORTED_PADDING + ) + + return padding_enum + + +def _rsa_sig_setup(backend, padding, algorithm, key, data, init_func): + padding_enum = _rsa_sig_determine_padding(backend, key, padding, algorithm) + evp_md = backend._lib.EVP_get_digestbyname(algorithm.name.encode("ascii")) + backend.openssl_assert(evp_md != backend._ffi.NULL) + pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) + backend.openssl_assert(pkey_ctx != backend._ffi.NULL) + pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) + res = init_func(pkey_ctx) + backend.openssl_assert(res == 1) + res = backend._lib.EVP_PKEY_CTX_set_signature_md(pkey_ctx, evp_md) + backend.openssl_assert(res > 0) + res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) + backend.openssl_assert(res > 0) + if isinstance(padding, PSS): + res = backend._lib.EVP_PKEY_CTX_set_rsa_pss_saltlen( + pkey_ctx, _get_rsa_pss_salt_length(padding, key, algorithm) + ) + backend.openssl_assert(res > 0) + + mgf1_md = backend._lib.EVP_get_digestbyname( + padding._mgf._algorithm.name.encode("ascii") + ) + backend.openssl_assert(mgf1_md != backend._ffi.NULL) + res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) + backend.openssl_assert(res > 0) + + return pkey_ctx + + +def _rsa_sig_sign(backend, padding, algorithm, private_key, data): + pkey_ctx = _rsa_sig_setup( + backend, padding, algorithm, private_key, data, + backend._lib.EVP_PKEY_sign_init + ) + buflen = backend._ffi.new("size_t *") + res = backend._lib.EVP_PKEY_sign( + pkey_ctx, + backend._ffi.NULL, + buflen, + data, + len(data) + ) + backend.openssl_assert(res == 1) + buf = backend._ffi.new("unsigned char[]", buflen[0]) + res = backend._lib.EVP_PKEY_sign( + pkey_ctx, buf, buflen, data, len(data)) + if res != 1: + errors = backend._consume_errors() + assert errors[0].lib == backend._lib.ERR_LIB_RSA + reason = None + if (errors[0].reason == + backend._lib.RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE): + reason = ("Salt length too long for key size. Try using " + "MAX_LENGTH instead.") + else: + assert (errors[0].reason == + backend._lib.RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY) + reason = "Digest too large for key size. Use a larger key." + assert reason is not None + raise ValueError(reason) + + return backend._ffi.buffer(buf)[:] + + +def _rsa_sig_verify(backend, padding, algorithm, public_key, signature, data): + pkey_ctx = _rsa_sig_setup( + backend, padding, algorithm, public_key, data, + backend._lib.EVP_PKEY_verify_init + ) + res = backend._lib.EVP_PKEY_verify( + pkey_ctx, signature, len(signature), data, len(data) + ) + # The previous call can return negative numbers in the event of an + # error. This is not a signature failure but we need to fail if it + # occurs. + backend.openssl_assert(res >= 0) + if res == 0: + backend._consume_errors() + raise InvalidSignature + + +@utils.register_interface(AsymmetricSignatureContext) +class _RSASignatureContext(object): + def __init__(self, backend, private_key, padding, algorithm): + self._backend = backend + self._private_key = private_key + + # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However + # we need to make a pointless call to it here so we maintain the + # API of erroring on init with this context if the values are invalid. + _rsa_sig_determine_padding(backend, private_key, padding, algorithm) + self._padding = padding + self._algorithm = algorithm + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def finalize(self): + return _rsa_sig_sign( + self._backend, + self._padding, + self._algorithm, + self._private_key, + self._hash_ctx.finalize() + ) + + +@utils.register_interface(AsymmetricVerificationContext) +class _RSAVerificationContext(object): + def __init__(self, backend, public_key, signature, padding, algorithm): + self._backend = backend + self._public_key = public_key + self._signature = signature + self._padding = padding + # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However + # we need to make a pointless call to it here so we maintain the + # API of erroring on init with this context if the values are invalid. + _rsa_sig_determine_padding(backend, public_key, padding, algorithm) + + padding = padding + self._algorithm = algorithm + self._hash_ctx = hashes.Hash(self._algorithm, self._backend) + + def update(self, data): + self._hash_ctx.update(data) + + def verify(self): + return _rsa_sig_verify( + self._backend, + self._padding, + self._algorithm, + self._public_key, + self._signature, + self._hash_ctx.finalize() + ) + + +@utils.register_interface(RSAPrivateKeyWithSerialization) +class _RSAPrivateKey(object): + def __init__(self, backend, rsa_cdata, evp_pkey): + self._backend = backend + self._rsa_cdata = rsa_cdata + self._evp_pkey = evp_pkey + + n = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, n, self._backend._ffi.NULL, + self._backend._ffi.NULL + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(n[0]) + + key_size = utils.read_only_property("_key_size") + + def signer(self, padding, algorithm): + _warn_sign_verify_deprecated() + _check_not_prehashed(algorithm) + return _RSASignatureContext(self._backend, self, padding, algorithm) + + def decrypt(self, ciphertext, padding): + key_size_bytes = int(math.ceil(self.key_size / 8.0)) + if key_size_bytes != len(ciphertext): + raise ValueError("Ciphertext length must be equal to key size.") + + return _enc_dec_rsa(self._backend, self, ciphertext, padding) + + def public_key(self): + ctx = self._backend._lib.RSAPublicKey_dup(self._rsa_cdata) + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.RSA_free) + res = self._backend._lib.RSA_blinding_on(ctx, self._backend._ffi.NULL) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._rsa_cdata_to_evp_pkey(ctx) + return _RSAPublicKey(self._backend, ctx, evp_pkey) + + def private_numbers(self): + n = self._backend._ffi.new("BIGNUM **") + e = self._backend._ffi.new("BIGNUM **") + d = self._backend._ffi.new("BIGNUM **") + p = self._backend._ffi.new("BIGNUM **") + q = self._backend._ffi.new("BIGNUM **") + dmp1 = self._backend._ffi.new("BIGNUM **") + dmq1 = self._backend._ffi.new("BIGNUM **") + iqmp = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key(self._rsa_cdata, n, e, d) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(d[0] != self._backend._ffi.NULL) + self._backend._lib.RSA_get0_factors(self._rsa_cdata, p, q) + self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) + self._backend._lib.RSA_get0_crt_params( + self._rsa_cdata, dmp1, dmq1, iqmp + ) + self._backend.openssl_assert(dmp1[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(dmq1[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(iqmp[0] != self._backend._ffi.NULL) + return rsa.RSAPrivateNumbers( + p=self._backend._bn_to_int(p[0]), + q=self._backend._bn_to_int(q[0]), + d=self._backend._bn_to_int(d[0]), + dmp1=self._backend._bn_to_int(dmp1[0]), + dmq1=self._backend._bn_to_int(dmq1[0]), + iqmp=self._backend._bn_to_int(iqmp[0]), + public_numbers=rsa.RSAPublicNumbers( + e=self._backend._bn_to_int(e[0]), + n=self._backend._bn_to_int(n[0]), + ) + ) + + def private_bytes(self, encoding, format, encryption_algorithm): + return self._backend._private_key_bytes( + encoding, + format, + encryption_algorithm, + self._evp_pkey, + self._rsa_cdata + ) + + def sign(self, data, padding, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _rsa_sig_sign(self._backend, padding, algorithm, self, data) + + +@utils.register_interface(RSAPublicKeyWithSerialization) +class _RSAPublicKey(object): + def __init__(self, backend, rsa_cdata, evp_pkey): + self._backend = backend + self._rsa_cdata = rsa_cdata + self._evp_pkey = evp_pkey + + n = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, n, self._backend._ffi.NULL, + self._backend._ffi.NULL + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._key_size = self._backend._lib.BN_num_bits(n[0]) + + key_size = utils.read_only_property("_key_size") + + def verifier(self, signature, padding, algorithm): + _warn_sign_verify_deprecated() + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + + _check_not_prehashed(algorithm) + return _RSAVerificationContext( + self._backend, self, signature, padding, algorithm + ) + + def encrypt(self, plaintext, padding): + return _enc_dec_rsa(self._backend, self, plaintext, padding) + + def public_numbers(self): + n = self._backend._ffi.new("BIGNUM **") + e = self._backend._ffi.new("BIGNUM **") + self._backend._lib.RSA_get0_key( + self._rsa_cdata, n, e, self._backend._ffi.NULL + ) + self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) + self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) + return rsa.RSAPublicNumbers( + e=self._backend._bn_to_int(e[0]), + n=self._backend._bn_to_int(n[0]), + ) + + def public_bytes(self, encoding, format): + return self._backend._public_key_bytes( + encoding, + format, + self, + self._evp_pkey, + self._rsa_cdata + ) + + def verify(self, signature, data, padding, algorithm): + data, algorithm = _calculate_digest_and_algorithm( + self._backend, data, algorithm + ) + return _rsa_sig_verify( + self._backend, padding, algorithm, self, signature, data + ) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/utils.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/utils.py new file mode 100644 index 0000000..05d0fe5 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/utils.py @@ -0,0 +1,45 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import warnings + +from cryptography import utils +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + + +def _calculate_digest_and_algorithm(backend, data, algorithm): + if not isinstance(algorithm, Prehashed): + hash_ctx = hashes.Hash(algorithm, backend) + hash_ctx.update(data) + data = hash_ctx.finalize() + else: + algorithm = algorithm._algorithm + + if len(data) != algorithm.digest_size: + raise ValueError( + "The provided data must be the same length as the hash " + "algorithm's digest size." + ) + + return (data, algorithm) + + +def _check_not_prehashed(signature_algorithm): + if isinstance(signature_algorithm, Prehashed): + raise TypeError( + "Prehashed is only supported in the sign and verify methods. " + "It cannot be used with signer or verifier." + ) + + +def _warn_sign_verify_deprecated(): + warnings.warn( + "signer and verifier have been deprecated. Please use sign " + "and verify instead.", + utils.PersistentlyDeprecated, + stacklevel=3 + ) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/x25519.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/x25519.py new file mode 100644 index 0000000..983ece6 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/x25519.py @@ -0,0 +1,79 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.hazmat.primitives.asymmetric.x25519 import ( + X25519PrivateKey, X25519PublicKey +) + + +@utils.register_interface(X25519PublicKey) +class _X25519PublicKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_bytes(self): + ucharpp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.EVP_PKEY_get1_tls_encodedpoint( + self._evp_pkey, ucharpp + ) + self._backend.openssl_assert(res == 32) + self._backend.openssl_assert(ucharpp[0] != self._backend._ffi.NULL) + data = self._backend._ffi.gc( + ucharpp[0], self._backend._lib.OPENSSL_free + ) + return self._backend._ffi.buffer(data, res)[:] + + +@utils.register_interface(X25519PrivateKey) +class _X25519PrivateKey(object): + def __init__(self, backend, evp_pkey): + self._backend = backend + self._evp_pkey = evp_pkey + + def public_key(self): + bio = self._backend._create_mem_bio_gc() + res = self._backend._lib.i2d_PUBKEY_bio(bio, self._evp_pkey) + self._backend.openssl_assert(res == 1) + evp_pkey = self._backend._lib.d2i_PUBKEY_bio( + bio, self._backend._ffi.NULL + ) + self._backend.openssl_assert(evp_pkey != self._backend._ffi.NULL) + evp_pkey = self._backend._ffi.gc( + evp_pkey, self._backend._lib.EVP_PKEY_free + ) + return _X25519PublicKey(self._backend, evp_pkey) + + def exchange(self, peer_public_key): + if not isinstance(peer_public_key, X25519PublicKey): + raise TypeError("peer_public_key must be X25519PublicKey.") + + ctx = self._backend._lib.EVP_PKEY_CTX_new( + self._evp_pkey, self._backend._ffi.NULL + ) + self._backend.openssl_assert(ctx != self._backend._ffi.NULL) + ctx = self._backend._ffi.gc(ctx, self._backend._lib.EVP_PKEY_CTX_free) + res = self._backend._lib.EVP_PKEY_derive_init(ctx) + self._backend.openssl_assert(res == 1) + res = self._backend._lib.EVP_PKEY_derive_set_peer( + ctx, peer_public_key._evp_pkey + ) + self._backend.openssl_assert(res == 1) + keylen = self._backend._ffi.new("size_t *") + res = self._backend._lib.EVP_PKEY_derive( + ctx, self._backend._ffi.NULL, keylen + ) + self._backend.openssl_assert(res == 1) + self._backend.openssl_assert(keylen[0] > 0) + buf = self._backend._ffi.new("unsigned char[]", keylen[0]) + res = self._backend._lib.EVP_PKEY_derive(ctx, buf, keylen) + if res != 1: + raise ValueError( + "Null shared key derived from public/private pair." + ) + + return self._backend._ffi.buffer(buf, keylen[0])[:] diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/x509.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/x509.py new file mode 100644 index 0000000..b870eeb --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/backends/openssl/x509.py @@ -0,0 +1,518 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import datetime +import operator +import warnings + +from cryptography import utils, x509 +from cryptography.exceptions import UnsupportedAlgorithm +from cryptography.hazmat.backends.openssl.decode_asn1 import ( + _CERTIFICATE_EXTENSION_PARSER, _CERTIFICATE_EXTENSION_PARSER_NO_SCT, + _CRL_EXTENSION_PARSER, _CSR_EXTENSION_PARSER, + _REVOKED_CERTIFICATE_EXTENSION_PARSER, _asn1_integer_to_int, + _asn1_string_to_bytes, _decode_x509_name, _obj2txt, _parse_asn1_time +) +from cryptography.hazmat.backends.openssl.encode_asn1 import ( + _encode_asn1_int_gc +) +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa + + +@utils.register_interface(x509.Certificate) +class _Certificate(object): + def __init__(self, backend, x509): + self._backend = backend + self._x509 = x509 + + def __repr__(self): + return "".format(self.subject) + + def __eq__(self, other): + if not isinstance(other, x509.Certificate): + return NotImplemented + + res = self._backend._lib.X509_cmp(self._x509, other._x509) + return res == 0 + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.public_bytes(serialization.Encoding.DER)) + + def fingerprint(self, algorithm): + h = hashes.Hash(algorithm, self._backend) + h.update(self.public_bytes(serialization.Encoding.DER)) + return h.finalize() + + @property + def version(self): + version = self._backend._lib.X509_get_version(self._x509) + if version == 0: + return x509.Version.v1 + elif version == 2: + return x509.Version.v3 + else: + raise x509.InvalidVersion( + "{0} is not a valid X509 version".format(version), version + ) + + @property + def serial(self): + warnings.warn( + "Certificate serial is deprecated, use serial_number instead.", + utils.PersistentlyDeprecated, + stacklevel=2 + ) + return self.serial_number + + @property + def serial_number(self): + asn1_int = self._backend._lib.X509_get_serialNumber(self._x509) + self._backend.openssl_assert(asn1_int != self._backend._ffi.NULL) + return _asn1_integer_to_int(self._backend, asn1_int) + + def public_key(self): + pkey = self._backend._lib.X509_get_pubkey(self._x509) + if pkey == self._backend._ffi.NULL: + # Remove errors from the stack. + self._backend._consume_errors() + raise ValueError("Certificate public key is of an unknown type") + + pkey = self._backend._ffi.gc(pkey, self._backend._lib.EVP_PKEY_free) + + return self._backend._evp_pkey_to_public_key(pkey) + + @property + def not_valid_before(self): + asn1_time = self._backend._lib.X509_get_notBefore(self._x509) + return _parse_asn1_time(self._backend, asn1_time) + + @property + def not_valid_after(self): + asn1_time = self._backend._lib.X509_get_notAfter(self._x509) + return _parse_asn1_time(self._backend, asn1_time) + + @property + def issuer(self): + issuer = self._backend._lib.X509_get_issuer_name(self._x509) + self._backend.openssl_assert(issuer != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, issuer) + + @property + def subject(self): + subject = self._backend._lib.X509_get_subject_name(self._x509) + self._backend.openssl_assert(subject != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, subject) + + @property + def signature_hash_algorithm(self): + oid = self.signature_algorithm_oid + try: + return x509._SIG_OIDS_TO_HASH[oid] + except KeyError: + raise UnsupportedAlgorithm( + "Signature algorithm OID:{0} not recognized".format(oid) + ) + + @property + def signature_algorithm_oid(self): + alg = self._backend._ffi.new("X509_ALGOR **") + self._backend._lib.X509_get0_signature( + self._backend._ffi.NULL, alg, self._x509 + ) + self._backend.openssl_assert(alg[0] != self._backend._ffi.NULL) + oid = _obj2txt(self._backend, alg[0].algorithm) + return x509.ObjectIdentifier(oid) + + @utils.cached_property + def extensions(self): + if self._backend._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER: + return _CERTIFICATE_EXTENSION_PARSER.parse( + self._backend, self._x509 + ) + else: + return _CERTIFICATE_EXTENSION_PARSER_NO_SCT.parse( + self._backend, self._x509 + ) + + @property + def signature(self): + sig = self._backend._ffi.new("ASN1_BIT_STRING **") + self._backend._lib.X509_get0_signature( + sig, self._backend._ffi.NULL, self._x509 + ) + self._backend.openssl_assert(sig[0] != self._backend._ffi.NULL) + return _asn1_string_to_bytes(self._backend, sig[0]) + + @property + def tbs_certificate_bytes(self): + pp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.i2d_re_X509_tbs(self._x509, pp) + self._backend.openssl_assert(res > 0) + pp = self._backend._ffi.gc( + pp, lambda pointer: self._backend._lib.OPENSSL_free(pointer[0]) + ) + return self._backend._ffi.buffer(pp[0], res)[:] + + def public_bytes(self, encoding): + bio = self._backend._create_mem_bio_gc() + if encoding is serialization.Encoding.PEM: + res = self._backend._lib.PEM_write_bio_X509(bio, self._x509) + elif encoding is serialization.Encoding.DER: + res = self._backend._lib.i2d_X509_bio(bio, self._x509) + else: + raise TypeError("encoding must be an item from the Encoding enum") + + self._backend.openssl_assert(res == 1) + return self._backend._read_mem_bio(bio) + + +@utils.register_interface(x509.RevokedCertificate) +class _RevokedCertificate(object): + def __init__(self, backend, crl, x509_revoked): + self._backend = backend + # The X509_REVOKED_value is a X509_REVOKED * that has + # no reference counting. This means when X509_CRL_free is + # called then the CRL and all X509_REVOKED * are freed. Since + # you can retain a reference to a single revoked certificate + # and let the CRL fall out of scope we need to retain a + # private reference to the CRL inside the RevokedCertificate + # object to prevent the gc from being called inappropriately. + self._crl = crl + self._x509_revoked = x509_revoked + + @property + def serial_number(self): + asn1_int = self._backend._lib.X509_REVOKED_get0_serialNumber( + self._x509_revoked + ) + self._backend.openssl_assert(asn1_int != self._backend._ffi.NULL) + return _asn1_integer_to_int(self._backend, asn1_int) + + @property + def revocation_date(self): + return _parse_asn1_time( + self._backend, + self._backend._lib.X509_REVOKED_get0_revocationDate( + self._x509_revoked + ) + ) + + @utils.cached_property + def extensions(self): + return _REVOKED_CERTIFICATE_EXTENSION_PARSER.parse( + self._backend, self._x509_revoked + ) + + +@utils.register_interface(x509.CertificateRevocationList) +class _CertificateRevocationList(object): + def __init__(self, backend, x509_crl): + self._backend = backend + self._x509_crl = x509_crl + + def __eq__(self, other): + if not isinstance(other, x509.CertificateRevocationList): + return NotImplemented + + res = self._backend._lib.X509_CRL_cmp(self._x509_crl, other._x509_crl) + return res == 0 + + def __ne__(self, other): + return not self == other + + def fingerprint(self, algorithm): + h = hashes.Hash(algorithm, self._backend) + bio = self._backend._create_mem_bio_gc() + res = self._backend._lib.i2d_X509_CRL_bio( + bio, self._x509_crl + ) + self._backend.openssl_assert(res == 1) + der = self._backend._read_mem_bio(bio) + h.update(der) + return h.finalize() + + def get_revoked_certificate_by_serial_number(self, serial_number): + revoked = self._backend._ffi.new("X509_REVOKED **") + asn1_int = _encode_asn1_int_gc(self._backend, serial_number) + res = self._backend._lib.X509_CRL_get0_by_serial( + self._x509_crl, revoked, asn1_int + ) + if res == 0: + return None + else: + self._backend.openssl_assert( + revoked[0] != self._backend._ffi.NULL + ) + return _RevokedCertificate( + self._backend, self._x509_crl, revoked[0] + ) + + @property + def signature_hash_algorithm(self): + oid = self.signature_algorithm_oid + try: + return x509._SIG_OIDS_TO_HASH[oid] + except KeyError: + raise UnsupportedAlgorithm( + "Signature algorithm OID:{0} not recognized".format(oid) + ) + + @property + def signature_algorithm_oid(self): + alg = self._backend._ffi.new("X509_ALGOR **") + self._backend._lib.X509_CRL_get0_signature( + self._x509_crl, self._backend._ffi.NULL, alg + ) + self._backend.openssl_assert(alg[0] != self._backend._ffi.NULL) + oid = _obj2txt(self._backend, alg[0].algorithm) + return x509.ObjectIdentifier(oid) + + @property + def issuer(self): + issuer = self._backend._lib.X509_CRL_get_issuer(self._x509_crl) + self._backend.openssl_assert(issuer != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, issuer) + + @property + def next_update(self): + nu = self._backend._lib.X509_CRL_get_nextUpdate(self._x509_crl) + self._backend.openssl_assert(nu != self._backend._ffi.NULL) + return _parse_asn1_time(self._backend, nu) + + @property + def last_update(self): + lu = self._backend._lib.X509_CRL_get_lastUpdate(self._x509_crl) + self._backend.openssl_assert(lu != self._backend._ffi.NULL) + return _parse_asn1_time(self._backend, lu) + + @property + def signature(self): + sig = self._backend._ffi.new("ASN1_BIT_STRING **") + self._backend._lib.X509_CRL_get0_signature( + self._x509_crl, sig, self._backend._ffi.NULL + ) + self._backend.openssl_assert(sig[0] != self._backend._ffi.NULL) + return _asn1_string_to_bytes(self._backend, sig[0]) + + @property + def tbs_certlist_bytes(self): + pp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.i2d_re_X509_CRL_tbs(self._x509_crl, pp) + self._backend.openssl_assert(res > 0) + pp = self._backend._ffi.gc( + pp, lambda pointer: self._backend._lib.OPENSSL_free(pointer[0]) + ) + return self._backend._ffi.buffer(pp[0], res)[:] + + def public_bytes(self, encoding): + bio = self._backend._create_mem_bio_gc() + if encoding is serialization.Encoding.PEM: + res = self._backend._lib.PEM_write_bio_X509_CRL( + bio, self._x509_crl + ) + elif encoding is serialization.Encoding.DER: + res = self._backend._lib.i2d_X509_CRL_bio(bio, self._x509_crl) + else: + raise TypeError("encoding must be an item from the Encoding enum") + + self._backend.openssl_assert(res == 1) + return self._backend._read_mem_bio(bio) + + def _revoked_cert(self, idx): + revoked = self._backend._lib.X509_CRL_get_REVOKED(self._x509_crl) + r = self._backend._lib.sk_X509_REVOKED_value(revoked, idx) + self._backend.openssl_assert(r != self._backend._ffi.NULL) + return _RevokedCertificate(self._backend, self, r) + + def __iter__(self): + for i in range(len(self)): + yield self._revoked_cert(i) + + def __getitem__(self, idx): + if isinstance(idx, slice): + start, stop, step = idx.indices(len(self)) + return [self._revoked_cert(i) for i in range(start, stop, step)] + else: + idx = operator.index(idx) + if idx < 0: + idx += len(self) + if not 0 <= idx < len(self): + raise IndexError + return self._revoked_cert(idx) + + def __len__(self): + revoked = self._backend._lib.X509_CRL_get_REVOKED(self._x509_crl) + if revoked == self._backend._ffi.NULL: + return 0 + else: + return self._backend._lib.sk_X509_REVOKED_num(revoked) + + @utils.cached_property + def extensions(self): + return _CRL_EXTENSION_PARSER.parse(self._backend, self._x509_crl) + + def is_signature_valid(self, public_key): + if not isinstance(public_key, (dsa.DSAPublicKey, rsa.RSAPublicKey, + ec.EllipticCurvePublicKey)): + raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey,' + ' or EllipticCurvePublicKey.') + res = self._backend._lib.X509_CRL_verify( + self._x509_crl, public_key._evp_pkey + ) + + if res != 1: + self._backend._consume_errors() + return False + + return True + + +@utils.register_interface(x509.CertificateSigningRequest) +class _CertificateSigningRequest(object): + def __init__(self, backend, x509_req): + self._backend = backend + self._x509_req = x509_req + + def __eq__(self, other): + if not isinstance(other, _CertificateSigningRequest): + return NotImplemented + + self_bytes = self.public_bytes(serialization.Encoding.DER) + other_bytes = other.public_bytes(serialization.Encoding.DER) + return self_bytes == other_bytes + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.public_bytes(serialization.Encoding.DER)) + + def public_key(self): + pkey = self._backend._lib.X509_REQ_get_pubkey(self._x509_req) + self._backend.openssl_assert(pkey != self._backend._ffi.NULL) + pkey = self._backend._ffi.gc(pkey, self._backend._lib.EVP_PKEY_free) + return self._backend._evp_pkey_to_public_key(pkey) + + @property + def subject(self): + subject = self._backend._lib.X509_REQ_get_subject_name(self._x509_req) + self._backend.openssl_assert(subject != self._backend._ffi.NULL) + return _decode_x509_name(self._backend, subject) + + @property + def signature_hash_algorithm(self): + oid = self.signature_algorithm_oid + try: + return x509._SIG_OIDS_TO_HASH[oid] + except KeyError: + raise UnsupportedAlgorithm( + "Signature algorithm OID:{0} not recognized".format(oid) + ) + + @property + def signature_algorithm_oid(self): + alg = self._backend._ffi.new("X509_ALGOR **") + self._backend._lib.X509_REQ_get0_signature( + self._x509_req, self._backend._ffi.NULL, alg + ) + self._backend.openssl_assert(alg[0] != self._backend._ffi.NULL) + oid = _obj2txt(self._backend, alg[0].algorithm) + return x509.ObjectIdentifier(oid) + + @utils.cached_property + def extensions(self): + x509_exts = self._backend._lib.X509_REQ_get_extensions(self._x509_req) + return _CSR_EXTENSION_PARSER.parse(self._backend, x509_exts) + + def public_bytes(self, encoding): + bio = self._backend._create_mem_bio_gc() + if encoding is serialization.Encoding.PEM: + res = self._backend._lib.PEM_write_bio_X509_REQ( + bio, self._x509_req + ) + elif encoding is serialization.Encoding.DER: + res = self._backend._lib.i2d_X509_REQ_bio(bio, self._x509_req) + else: + raise TypeError("encoding must be an item from the Encoding enum") + + self._backend.openssl_assert(res == 1) + return self._backend._read_mem_bio(bio) + + @property + def tbs_certrequest_bytes(self): + pp = self._backend._ffi.new("unsigned char **") + res = self._backend._lib.i2d_re_X509_REQ_tbs(self._x509_req, pp) + self._backend.openssl_assert(res > 0) + pp = self._backend._ffi.gc( + pp, lambda pointer: self._backend._lib.OPENSSL_free(pointer[0]) + ) + return self._backend._ffi.buffer(pp[0], res)[:] + + @property + def signature(self): + sig = self._backend._ffi.new("ASN1_BIT_STRING **") + self._backend._lib.X509_REQ_get0_signature( + self._x509_req, sig, self._backend._ffi.NULL + ) + self._backend.openssl_assert(sig[0] != self._backend._ffi.NULL) + return _asn1_string_to_bytes(self._backend, sig[0]) + + @property + def is_signature_valid(self): + pkey = self._backend._lib.X509_REQ_get_pubkey(self._x509_req) + self._backend.openssl_assert(pkey != self._backend._ffi.NULL) + pkey = self._backend._ffi.gc(pkey, self._backend._lib.EVP_PKEY_free) + res = self._backend._lib.X509_REQ_verify(self._x509_req, pkey) + + if res != 1: + self._backend._consume_errors() + return False + + return True + + +@utils.register_interface( + x509.certificate_transparency.SignedCertificateTimestamp +) +class _SignedCertificateTimestamp(object): + def __init__(self, backend, sct_list, sct): + self._backend = backend + # Keep the SCT_LIST that this SCT came from alive. + self._sct_list = sct_list + self._sct = sct + + @property + def version(self): + version = self._backend._lib.SCT_get_version(self._sct) + assert version == self._backend._lib.SCT_VERSION_V1 + return x509.certificate_transparency.Version.v1 + + @property + def log_id(self): + out = self._backend._ffi.new("unsigned char **") + log_id_length = self._backend._lib.SCT_get0_log_id(self._sct, out) + assert log_id_length >= 0 + return self._backend._ffi.buffer(out[0], log_id_length)[:] + + @property + def timestamp(self): + timestamp = self._backend._lib.SCT_get_timestamp(self._sct) + milliseconds = timestamp % 1000 + return datetime.datetime.utcfromtimestamp( + timestamp // 1000 + ).replace(microsecond=milliseconds * 1000) + + @property + def entry_type(self): + entry_type = self._backend._lib.SCT_get_log_entry_type(self._sct) + # We currently only support loading SCTs from the X.509 extension, so + # we only have precerts. + assert entry_type == self._backend._lib.CT_LOG_ENTRY_TYPE_PRECERT + return x509.certificate_transparency.LogEntryType.PRE_CERTIFICATE diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/__init__.py new file mode 100644 index 0000000..4b54088 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/__init__.py @@ -0,0 +1,5 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_constant_time.cp37-win32.pyd b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_constant_time.cp37-win32.pyd new file mode 100644 index 0000000..7e80078 Binary files /dev/null and b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_constant_time.cp37-win32.pyd differ diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_openssl.cp37-win32.pyd b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_openssl.cp37-win32.pyd new file mode 100644 index 0000000..06ec31e Binary files /dev/null and b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_openssl.cp37-win32.pyd differ diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_padding.cp37-win32.pyd b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_padding.cp37-win32.pyd new file mode 100644 index 0000000..72ae813 Binary files /dev/null and b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/_padding.cp37-win32.pyd differ diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/__init__.py new file mode 100644 index 0000000..4b54088 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/__init__.py @@ -0,0 +1,5 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/_conditional.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/_conditional.py new file mode 100644 index 0000000..b3e4e8b --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/_conditional.py @@ -0,0 +1,302 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +def cryptography_has_ec2m(): + return [ + "EC_POINT_set_affine_coordinates_GF2m", + "EC_POINT_get_affine_coordinates_GF2m", + "EC_POINT_set_compressed_coordinates_GF2m", + ] + + +def cryptography_has_ec_1_0_2(): + return [ + "EC_curve_nid2nist", + ] + + +def cryptography_has_set_ecdh_auto(): + return [ + "SSL_CTX_set_ecdh_auto", + ] + + +def cryptography_has_rsa_r_pkcs_decoding_error(): + return [ + "RSA_R_PKCS_DECODING_ERROR" + ] + + +def cryptography_has_rsa_oaep_md(): + return [ + "EVP_PKEY_CTX_set_rsa_oaep_md", + ] + + +def cryptography_has_rsa_oaep_label(): + return [ + "EVP_PKEY_CTX_set0_rsa_oaep_label", + ] + + +def cryptography_has_ssl3_method(): + return [ + "SSLv3_method", + "SSLv3_client_method", + "SSLv3_server_method", + ] + + +def cryptography_has_alpn(): + return [ + "SSL_CTX_set_alpn_protos", + "SSL_set_alpn_protos", + "SSL_CTX_set_alpn_select_cb", + "SSL_get0_alpn_selected", + ] + + +def cryptography_has_compression(): + return [ + "SSL_get_current_compression", + "SSL_get_current_expansion", + "SSL_COMP_get_name", + ] + + +def cryptography_has_get_server_tmp_key(): + return [ + "SSL_get_server_tmp_key", + ] + + +def cryptography_has_102_verification_error_codes(): + return [ + 'X509_V_ERR_SUITE_B_INVALID_VERSION', + 'X509_V_ERR_SUITE_B_INVALID_ALGORITHM', + 'X509_V_ERR_SUITE_B_INVALID_CURVE', + 'X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM', + 'X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED', + 'X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256', + 'X509_V_ERR_HOSTNAME_MISMATCH', + 'X509_V_ERR_EMAIL_MISMATCH', + 'X509_V_ERR_IP_ADDRESS_MISMATCH' + ] + + +def cryptography_has_102_verification_params(): + return [ + "X509_V_FLAG_SUITEB_128_LOS_ONLY", + "X509_V_FLAG_SUITEB_192_LOS", + "X509_V_FLAG_SUITEB_128_LOS", + "X509_VERIFY_PARAM_set1_host", + "X509_VERIFY_PARAM_set1_email", + "X509_VERIFY_PARAM_set1_ip", + "X509_VERIFY_PARAM_set1_ip_asc", + "X509_VERIFY_PARAM_set_hostflags", + ] + + +def cryptography_has_x509_v_flag_trusted_first(): + return [ + "X509_V_FLAG_TRUSTED_FIRST", + ] + + +def cryptography_has_x509_v_flag_partial_chain(): + return [ + "X509_V_FLAG_PARTIAL_CHAIN", + ] + + +def cryptography_has_set_cert_cb(): + return [ + "SSL_CTX_set_cert_cb", + "SSL_set_cert_cb", + ] + + +def cryptography_has_ssl_st(): + return [ + "SSL_ST_BEFORE", + "SSL_ST_OK", + "SSL_ST_INIT", + "SSL_ST_RENEGOTIATE", + ] + + +def cryptography_has_tls_st(): + return [ + "TLS_ST_BEFORE", + "TLS_ST_OK", + ] + + +def cryptography_has_locking_callbacks(): + return [ + "CRYPTO_LOCK", + "CRYPTO_UNLOCK", + "CRYPTO_READ", + "CRYPTO_LOCK_SSL", + "CRYPTO_lock", + ] + + +def cryptography_has_scrypt(): + return [ + "EVP_PBE_scrypt", + ] + + +def cryptography_has_generic_dtls_method(): + return [ + "DTLS_method", + "DTLS_server_method", + "DTLS_client_method", + "SSL_OP_NO_DTLSv1", + "SSL_OP_NO_DTLSv1_2", + "DTLS_set_link_mtu", + "DTLS_get_link_min_mtu", + ] + + +def cryptography_has_evp_pkey_dhx(): + return [ + "EVP_PKEY_DHX", + ] + + +def cryptography_has_mem_functions(): + return [ + "Cryptography_CRYPTO_set_mem_functions", + ] + + +def cryptography_has_sct(): + return [ + "SCT_get_version", + "SCT_get_log_entry_type", + "SCT_get0_log_id", + "SCT_get_timestamp", + "SCT_set_source", + "sk_SCT_num", + "sk_SCT_value", + "SCT_LIST_free", + ] + + +def cryptography_has_x509_store_ctx_get_issuer(): + return [ + "X509_STORE_get_get_issuer", + "X509_STORE_set_get_issuer", + ] + + +def cryptography_has_x25519(): + return [ + "EVP_PKEY_X25519", + "NID_X25519", + ] + + +def cryptography_has_evp_pkey_get_set_tls_encodedpoint(): + return [ + "EVP_PKEY_get1_tls_encodedpoint", + "EVP_PKEY_set1_tls_encodedpoint", + ] + + +def cryptography_has_fips(): + return [ + "FIPS_set_mode", + "FIPS_mode", + ] + + +def cryptography_has_ssl_sigalgs(): + return [ + "SSL_CTX_set1_sigalgs_list", + "SSL_get_sigalgs", + ] + + +def cryptography_has_psk(): + return [ + "SSL_CTX_use_psk_identity_hint", + "SSL_CTX_set_psk_server_callback", + "SSL_CTX_set_psk_client_callback", + ] + + +def cryptography_has_custom_ext(): + return [ + "SSL_CTX_add_client_custom_ext", + "SSL_CTX_add_server_custom_ext", + "SSL_extension_supported", + ] + + +def cryptography_has_openssl_cleanup(): + return [ + "OPENSSL_cleanup", + ] + + +# This is a mapping of +# {condition: function-returning-names-dependent-on-that-condition} so we can +# loop over them and delete unsupported names at runtime. It will be removed +# when cffi supports #if in cdef. We use functions instead of just a dict of +# lists so we can use coverage to measure which are used. +CONDITIONAL_NAMES = { + "Cryptography_HAS_EC2M": cryptography_has_ec2m, + "Cryptography_HAS_EC_1_0_2": cryptography_has_ec_1_0_2, + "Cryptography_HAS_SET_ECDH_AUTO": cryptography_has_set_ecdh_auto, + "Cryptography_HAS_RSA_R_PKCS_DECODING_ERROR": ( + cryptography_has_rsa_r_pkcs_decoding_error + ), + "Cryptography_HAS_RSA_OAEP_MD": cryptography_has_rsa_oaep_md, + "Cryptography_HAS_RSA_OAEP_LABEL": cryptography_has_rsa_oaep_label, + "Cryptography_HAS_SSL3_METHOD": cryptography_has_ssl3_method, + "Cryptography_HAS_ALPN": cryptography_has_alpn, + "Cryptography_HAS_COMPRESSION": cryptography_has_compression, + "Cryptography_HAS_GET_SERVER_TMP_KEY": cryptography_has_get_server_tmp_key, + "Cryptography_HAS_102_VERIFICATION_ERROR_CODES": ( + cryptography_has_102_verification_error_codes + ), + "Cryptography_HAS_102_VERIFICATION_PARAMS": ( + cryptography_has_102_verification_params + ), + "Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST": ( + cryptography_has_x509_v_flag_trusted_first + ), + "Cryptography_HAS_X509_V_FLAG_PARTIAL_CHAIN": ( + cryptography_has_x509_v_flag_partial_chain + ), + "Cryptography_HAS_SET_CERT_CB": cryptography_has_set_cert_cb, + "Cryptography_HAS_SSL_ST": cryptography_has_ssl_st, + "Cryptography_HAS_TLS_ST": cryptography_has_tls_st, + "Cryptography_HAS_LOCKING_CALLBACKS": cryptography_has_locking_callbacks, + "Cryptography_HAS_SCRYPT": cryptography_has_scrypt, + "Cryptography_HAS_GENERIC_DTLS_METHOD": ( + cryptography_has_generic_dtls_method + ), + "Cryptography_HAS_EVP_PKEY_DHX": cryptography_has_evp_pkey_dhx, + "Cryptography_HAS_MEM_FUNCTIONS": cryptography_has_mem_functions, + "Cryptography_HAS_SCT": cryptography_has_sct, + "Cryptography_HAS_X509_STORE_CTX_GET_ISSUER": ( + cryptography_has_x509_store_ctx_get_issuer + ), + "Cryptography_HAS_X25519": cryptography_has_x25519, + "Cryptography_HAS_EVP_PKEY_get_set_tls_encodedpoint": ( + cryptography_has_evp_pkey_get_set_tls_encodedpoint + ), + "Cryptography_HAS_FIPS": cryptography_has_fips, + "Cryptography_HAS_SIGALGS": cryptography_has_ssl_sigalgs, + "Cryptography_HAS_PSK": cryptography_has_psk, + "Cryptography_HAS_CUSTOM_EXT": cryptography_has_custom_ext, + "Cryptography_HAS_OPENSSL_CLEANUP": cryptography_has_openssl_cleanup, +} diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/binding.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/binding.py new file mode 100644 index 0000000..81cf547 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/bindings/openssl/binding.py @@ -0,0 +1,157 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import collections +import threading +import types + +from cryptography import utils +from cryptography.exceptions import InternalError +from cryptography.hazmat.bindings._openssl import ffi, lib +from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES + +_OpenSSLErrorWithText = collections.namedtuple( + "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"] +) + + +class _OpenSSLError(object): + def __init__(self, code, lib, func, reason): + self._code = code + self._lib = lib + self._func = func + self._reason = reason + + def _lib_reason_match(self, lib, reason): + return lib == self.lib and reason == self.reason + + code = utils.read_only_property("_code") + lib = utils.read_only_property("_lib") + func = utils.read_only_property("_func") + reason = utils.read_only_property("_reason") + + +def _consume_errors(lib): + errors = [] + while True: + code = lib.ERR_get_error() + if code == 0: + break + + err_lib = lib.ERR_GET_LIB(code) + err_func = lib.ERR_GET_FUNC(code) + err_reason = lib.ERR_GET_REASON(code) + + errors.append(_OpenSSLError(code, err_lib, err_func, err_reason)) + + return errors + + +def _openssl_assert(lib, ok): + if not ok: + errors = _consume_errors(lib) + errors_with_text = [] + for err in errors: + buf = ffi.new("char[]", 256) + lib.ERR_error_string_n(err.code, buf, len(buf)) + err_text_reason = ffi.string(buf) + + errors_with_text.append( + _OpenSSLErrorWithText( + err.code, err.lib, err.func, err.reason, err_text_reason + ) + ) + + raise InternalError( + "Unknown OpenSSL error. This error is commonly encountered when " + "another library is not cleaning up the OpenSSL error stack. If " + "you are using cryptography with another library that uses " + "OpenSSL try disabling it before reporting a bug. Otherwise " + "please file an issue at https://github.com/pyca/cryptography/" + "issues with information on how to reproduce " + "this. ({0!r})".format(errors_with_text), + errors_with_text + ) + + +def build_conditional_library(lib, conditional_names): + conditional_lib = types.ModuleType("lib") + conditional_lib._original_lib = lib + excluded_names = set() + for condition, names_cb in conditional_names.items(): + if not getattr(lib, condition): + excluded_names.update(names_cb()) + + for attr in dir(lib): + if attr not in excluded_names: + setattr(conditional_lib, attr, getattr(lib, attr)) + + return conditional_lib + + +class Binding(object): + """ + OpenSSL API wrapper. + """ + lib = None + ffi = ffi + _lib_loaded = False + _init_lock = threading.Lock() + _lock_init_lock = threading.Lock() + + def __init__(self): + self._ensure_ffi_initialized() + + @classmethod + def _register_osrandom_engine(cls): + # Clear any errors extant in the queue before we start. In many + # scenarios other things may be interacting with OpenSSL in the same + # process space and it has proven untenable to assume that they will + # reliably clear the error queue. Once we clear it here we will + # error on any subsequent unexpected item in the stack. + cls.lib.ERR_clear_error() + cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id + cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name + result = cls.lib.Cryptography_add_osrandom_engine() + _openssl_assert(cls.lib, result in (1, 2)) + + @classmethod + def _ensure_ffi_initialized(cls): + with cls._init_lock: + if not cls._lib_loaded: + cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES) + cls._lib_loaded = True + # initialize the SSL library + cls.lib.SSL_library_init() + # adds all ciphers/digests for EVP + cls.lib.OpenSSL_add_all_algorithms() + # loads error strings for libcrypto and libssl functions + cls.lib.SSL_load_error_strings() + cls._register_osrandom_engine() + + @classmethod + def init_static_locks(cls): + with cls._lock_init_lock: + cls._ensure_ffi_initialized() + # Use Python's implementation if available, importing _ssl triggers + # the setup for this. + __import__("_ssl") + + if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL: + return + + # If nothing else has setup a locking callback already, we set up + # our own + res = lib.Cryptography_setup_ssl_threads() + _openssl_assert(cls.lib, res == 1) + + +# OpenSSL is not thread safe until the locks are initialized. We call this +# method in module scope so that it executes with the import lock. On +# Pythons < 3.4 this import lock is a global lock, which can prevent a race +# condition registering the OpenSSL locks. On Python 3.4+ the import lock +# is per module so this approach will not work. +Binding.init_static_locks() diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/__init__.py new file mode 100644 index 0000000..4b54088 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/__init__.py @@ -0,0 +1,5 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/__init__.py new file mode 100644 index 0000000..494a7a1 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/__init__.py @@ -0,0 +1,40 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AsymmetricSignatureContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes and returns nothing. + """ + + @abc.abstractmethod + def finalize(self): + """ + Returns the signature as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AsymmetricVerificationContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes and returns nothing. + """ + + @abc.abstractmethod + def verify(self): + """ + Raises an exception if the bytes provided to update do not match the + signature or the signature does not match the public key. + """ diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/dh.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/dh.py new file mode 100644 index 0000000..4fc9952 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/dh.py @@ -0,0 +1,212 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +def generate_parameters(generator, key_size, backend): + return backend.generate_dh_parameters(generator, key_size) + + +class DHPrivateNumbers(object): + def __init__(self, x, public_numbers): + if not isinstance(x, six.integer_types): + raise TypeError("x must be an integer.") + + if not isinstance(public_numbers, DHPublicNumbers): + raise TypeError("public_numbers must be an instance of " + "DHPublicNumbers.") + + self._x = x + self._public_numbers = public_numbers + + def __eq__(self, other): + if not isinstance(other, DHPrivateNumbers): + return NotImplemented + + return ( + self._x == other._x and + self._public_numbers == other._public_numbers + ) + + def __ne__(self, other): + return not self == other + + def private_key(self, backend): + return backend.load_dh_private_numbers(self) + + public_numbers = utils.read_only_property("_public_numbers") + x = utils.read_only_property("_x") + + +class DHPublicNumbers(object): + def __init__(self, y, parameter_numbers): + if not isinstance(y, six.integer_types): + raise TypeError("y must be an integer.") + + if not isinstance(parameter_numbers, DHParameterNumbers): + raise TypeError( + "parameters must be an instance of DHParameterNumbers.") + + self._y = y + self._parameter_numbers = parameter_numbers + + def __eq__(self, other): + if not isinstance(other, DHPublicNumbers): + return NotImplemented + + return ( + self._y == other._y and + self._parameter_numbers == other._parameter_numbers + ) + + def __ne__(self, other): + return not self == other + + def public_key(self, backend): + return backend.load_dh_public_numbers(self) + + y = utils.read_only_property("_y") + parameter_numbers = utils.read_only_property("_parameter_numbers") + + +class DHParameterNumbers(object): + def __init__(self, p, g, q=None): + if ( + not isinstance(p, six.integer_types) or + not isinstance(g, six.integer_types) + ): + raise TypeError("p and g must be integers") + if q is not None and not isinstance(q, six.integer_types): + raise TypeError("q must be integer or None") + + if g < 2: + raise ValueError("DH generator must be 2 or greater") + + self._p = p + self._g = g + self._q = q + + def __eq__(self, other): + if not isinstance(other, DHParameterNumbers): + return NotImplemented + + return ( + self._p == other._p and + self._g == other._g and + self._q == other._q + ) + + def __ne__(self, other): + return not self == other + + def parameters(self, backend): + return backend.load_dh_parameter_numbers(self) + + p = utils.read_only_property("_p") + g = utils.read_only_property("_g") + q = utils.read_only_property("_q") + + +@six.add_metaclass(abc.ABCMeta) +class DHParameters(object): + @abc.abstractmethod + def generate_private_key(self): + """ + Generates and returns a DHPrivateKey. + """ + + @abc.abstractmethod + def parameter_bytes(self, encoding, format): + """ + Returns the parameters serialized as bytes. + """ + + @abc.abstractmethod + def parameter_numbers(self): + """ + Returns a DHParameterNumbers. + """ + + +DHParametersWithSerialization = DHParameters + + +@six.add_metaclass(abc.ABCMeta) +class DHPrivateKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def public_key(self): + """ + The DHPublicKey associated with this private key. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DHParameters object associated with this private key. + """ + + @abc.abstractmethod + def exchange(self, peer_public_key): + """ + Given peer's DHPublicKey, carry out the key exchange and + return shared key as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DHPrivateKeyWithSerialization(DHPrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns a DHPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DHPublicKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DHParameters object associated with this public key. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns a DHPublicNumbers. + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + +DHPublicKeyWithSerialization = DHPublicKey diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/dsa.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/dsa.py new file mode 100644 index 0000000..e380a44 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/dsa.py @@ -0,0 +1,254 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +@six.add_metaclass(abc.ABCMeta) +class DSAParameters(object): + @abc.abstractmethod + def generate_private_key(self): + """ + Generates and returns a DSAPrivateKey. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAParametersWithNumbers(DSAParameters): + @abc.abstractmethod + def parameter_numbers(self): + """ + Returns a DSAParameterNumbers. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAPrivateKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def public_key(self): + """ + The DSAPublicKey associated with this private key. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DSAParameters object associated with this private key. + """ + + @abc.abstractmethod + def signer(self, signature_algorithm): + """ + Returns an AsymmetricSignatureContext used for signing data. + """ + + @abc.abstractmethod + def sign(self, data, algorithm): + """ + Signs the data + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAPrivateKeyWithSerialization(DSAPrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns a DSAPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class DSAPublicKey(object): + @abc.abstractproperty + def key_size(self): + """ + The bit length of the prime modulus. + """ + + @abc.abstractmethod + def parameters(self): + """ + The DSAParameters object associated with this public key. + """ + + @abc.abstractmethod + def verifier(self, signature, signature_algorithm): + """ + Returns an AsymmetricVerificationContext used for signing data. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns a DSAPublicNumbers. + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify(self, signature, data, algorithm): + """ + Verifies the signature of the data. + """ + + +DSAPublicKeyWithSerialization = DSAPublicKey + + +def generate_parameters(key_size, backend): + return backend.generate_dsa_parameters(key_size) + + +def generate_private_key(key_size, backend): + return backend.generate_dsa_private_key_and_parameters(key_size) + + +def _check_dsa_parameters(parameters): + if parameters.p.bit_length() not in [1024, 2048, 3072]: + raise ValueError("p must be exactly 1024, 2048, or 3072 bits long") + if parameters.q.bit_length() not in [160, 224, 256]: + raise ValueError("q must be exactly 160, 224, or 256 bits long") + + if not (1 < parameters.g < parameters.p): + raise ValueError("g, p don't satisfy 1 < g < p.") + + +def _check_dsa_private_numbers(numbers): + parameters = numbers.public_numbers.parameter_numbers + _check_dsa_parameters(parameters) + if numbers.x <= 0 or numbers.x >= parameters.q: + raise ValueError("x must be > 0 and < q.") + + if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p): + raise ValueError("y must be equal to (g ** x % p).") + + +class DSAParameterNumbers(object): + def __init__(self, p, q, g): + if ( + not isinstance(p, six.integer_types) or + not isinstance(q, six.integer_types) or + not isinstance(g, six.integer_types) + ): + raise TypeError( + "DSAParameterNumbers p, q, and g arguments must be integers." + ) + + self._p = p + self._q = q + self._g = g + + p = utils.read_only_property("_p") + q = utils.read_only_property("_q") + g = utils.read_only_property("_g") + + def parameters(self, backend): + return backend.load_dsa_parameter_numbers(self) + + def __eq__(self, other): + if not isinstance(other, DSAParameterNumbers): + return NotImplemented + + return self.p == other.p and self.q == other.q and self.g == other.g + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return ( + "".format( + self=self + ) + ) + + +class DSAPublicNumbers(object): + def __init__(self, y, parameter_numbers): + if not isinstance(y, six.integer_types): + raise TypeError("DSAPublicNumbers y argument must be an integer.") + + if not isinstance(parameter_numbers, DSAParameterNumbers): + raise TypeError( + "parameter_numbers must be a DSAParameterNumbers instance." + ) + + self._y = y + self._parameter_numbers = parameter_numbers + + y = utils.read_only_property("_y") + parameter_numbers = utils.read_only_property("_parameter_numbers") + + def public_key(self, backend): + return backend.load_dsa_public_numbers(self) + + def __eq__(self, other): + if not isinstance(other, DSAPublicNumbers): + return NotImplemented + + return ( + self.y == other.y and + self.parameter_numbers == other.parameter_numbers + ) + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return ( + "".format(self=self) + ) + + +class DSAPrivateNumbers(object): + def __init__(self, x, public_numbers): + if not isinstance(x, six.integer_types): + raise TypeError("DSAPrivateNumbers x argument must be an integer.") + + if not isinstance(public_numbers, DSAPublicNumbers): + raise TypeError( + "public_numbers must be a DSAPublicNumbers instance." + ) + self._public_numbers = public_numbers + self._x = x + + x = utils.read_only_property("_x") + public_numbers = utils.read_only_property("_public_numbers") + + def private_key(self, backend): + return backend.load_dsa_private_numbers(self) + + def __eq__(self, other): + if not isinstance(other, DSAPrivateNumbers): + return NotImplemented + + return ( + self.x == other.x and self.public_numbers == other.public_numbers + ) + + def __ne__(self, other): + return not self == other diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/ec.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/ec.py new file mode 100644 index 0000000..6cbfcab --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/ec.py @@ -0,0 +1,411 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurve(object): + @abc.abstractproperty + def name(self): + """ + The name of the curve. e.g. secp256r1. + """ + + @abc.abstractproperty + def key_size(self): + """ + Bit size of a secret scalar for the curve. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurveSignatureAlgorithm(object): + @abc.abstractproperty + def algorithm(self): + """ + The digest algorithm used with this signature. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurvePrivateKey(object): + @abc.abstractmethod + def signer(self, signature_algorithm): + """ + Returns an AsymmetricSignatureContext used for signing data. + """ + + @abc.abstractmethod + def exchange(self, algorithm, peer_public_key): + """ + Performs a key exchange operation using the provided algorithm with the + provided peer's public key. + """ + + @abc.abstractmethod + def public_key(self): + """ + The EllipticCurvePublicKey for this private key. + """ + + @abc.abstractproperty + def curve(self): + """ + The EllipticCurve that this key is on. + """ + + @abc.abstractproperty + def key_size(self): + """ + Bit size of a secret scalar for the curve. + """ + + @abc.abstractproperty + def sign(self, data, signature_algorithm): + """ + Signs the data + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurvePrivateKeyWithSerialization(EllipticCurvePrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns an EllipticCurvePrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class EllipticCurvePublicKey(object): + @abc.abstractmethod + def verifier(self, signature, signature_algorithm): + """ + Returns an AsymmetricVerificationContext used for signing data. + """ + + @abc.abstractproperty + def curve(self): + """ + The EllipticCurve that this key is on. + """ + + @abc.abstractproperty + def key_size(self): + """ + Bit size of a secret scalar for the curve. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns an EllipticCurvePublicNumbers. + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify(self, signature, data, signature_algorithm): + """ + Verifies the signature of the data. + """ + + +EllipticCurvePublicKeyWithSerialization = EllipticCurvePublicKey + + +@utils.register_interface(EllipticCurve) +class SECT571R1(object): + name = "sect571r1" + key_size = 570 + + +@utils.register_interface(EllipticCurve) +class SECT409R1(object): + name = "sect409r1" + key_size = 409 + + +@utils.register_interface(EllipticCurve) +class SECT283R1(object): + name = "sect283r1" + key_size = 283 + + +@utils.register_interface(EllipticCurve) +class SECT233R1(object): + name = "sect233r1" + key_size = 233 + + +@utils.register_interface(EllipticCurve) +class SECT163R2(object): + name = "sect163r2" + key_size = 163 + + +@utils.register_interface(EllipticCurve) +class SECT571K1(object): + name = "sect571k1" + key_size = 571 + + +@utils.register_interface(EllipticCurve) +class SECT409K1(object): + name = "sect409k1" + key_size = 409 + + +@utils.register_interface(EllipticCurve) +class SECT283K1(object): + name = "sect283k1" + key_size = 283 + + +@utils.register_interface(EllipticCurve) +class SECT233K1(object): + name = "sect233k1" + key_size = 233 + + +@utils.register_interface(EllipticCurve) +class SECT163K1(object): + name = "sect163k1" + key_size = 163 + + +@utils.register_interface(EllipticCurve) +class SECP521R1(object): + name = "secp521r1" + key_size = 521 + + +@utils.register_interface(EllipticCurve) +class SECP384R1(object): + name = "secp384r1" + key_size = 384 + + +@utils.register_interface(EllipticCurve) +class SECP256R1(object): + name = "secp256r1" + key_size = 256 + + +@utils.register_interface(EllipticCurve) +class SECP256K1(object): + name = "secp256k1" + key_size = 256 + + +@utils.register_interface(EllipticCurve) +class SECP224R1(object): + name = "secp224r1" + key_size = 224 + + +@utils.register_interface(EllipticCurve) +class SECP192R1(object): + name = "secp192r1" + key_size = 192 + + +@utils.register_interface(EllipticCurve) +class BrainpoolP256R1(object): + name = "brainpoolP256r1" + key_size = 256 + + +@utils.register_interface(EllipticCurve) +class BrainpoolP384R1(object): + name = "brainpoolP384r1" + key_size = 384 + + +@utils.register_interface(EllipticCurve) +class BrainpoolP512R1(object): + name = "brainpoolP512r1" + key_size = 512 + + +_CURVE_TYPES = { + "prime192v1": SECP192R1, + "prime256v1": SECP256R1, + + "secp192r1": SECP192R1, + "secp224r1": SECP224R1, + "secp256r1": SECP256R1, + "secp384r1": SECP384R1, + "secp521r1": SECP521R1, + "secp256k1": SECP256K1, + + "sect163k1": SECT163K1, + "sect233k1": SECT233K1, + "sect283k1": SECT283K1, + "sect409k1": SECT409K1, + "sect571k1": SECT571K1, + + "sect163r2": SECT163R2, + "sect233r1": SECT233R1, + "sect283r1": SECT283R1, + "sect409r1": SECT409R1, + "sect571r1": SECT571R1, + + "brainpoolP256r1": BrainpoolP256R1, + "brainpoolP384r1": BrainpoolP384R1, + "brainpoolP512r1": BrainpoolP512R1, +} + + +@utils.register_interface(EllipticCurveSignatureAlgorithm) +class ECDSA(object): + def __init__(self, algorithm): + self._algorithm = algorithm + + algorithm = utils.read_only_property("_algorithm") + + +def generate_private_key(curve, backend): + return backend.generate_elliptic_curve_private_key(curve) + + +def derive_private_key(private_value, curve, backend): + if not isinstance(private_value, six.integer_types): + raise TypeError("private_value must be an integer type.") + + if private_value <= 0: + raise ValueError("private_value must be a positive integer.") + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must provide the EllipticCurve interface.") + + return backend.derive_elliptic_curve_private_key(private_value, curve) + + +class EllipticCurvePublicNumbers(object): + def __init__(self, x, y, curve): + if ( + not isinstance(x, six.integer_types) or + not isinstance(y, six.integer_types) + ): + raise TypeError("x and y must be integers.") + + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must provide the EllipticCurve interface.") + + self._y = y + self._x = x + self._curve = curve + + def public_key(self, backend): + return backend.load_elliptic_curve_public_numbers(self) + + def encode_point(self): + # key_size is in bits. Convert to bytes and round up + byte_length = (self.curve.key_size + 7) // 8 + return ( + b'\x04' + utils.int_to_bytes(self.x, byte_length) + + utils.int_to_bytes(self.y, byte_length) + ) + + @classmethod + def from_encoded_point(cls, curve, data): + if not isinstance(curve, EllipticCurve): + raise TypeError("curve must be an EllipticCurve instance") + + if data.startswith(b'\x04'): + # key_size is in bits. Convert to bytes and round up + byte_length = (curve.key_size + 7) // 8 + if len(data) == 2 * byte_length + 1: + x = utils.int_from_bytes(data[1:byte_length + 1], 'big') + y = utils.int_from_bytes(data[byte_length + 1:], 'big') + return cls(x, y, curve) + else: + raise ValueError('Invalid elliptic curve point data length') + else: + raise ValueError('Unsupported elliptic curve point type') + + curve = utils.read_only_property("_curve") + x = utils.read_only_property("_x") + y = utils.read_only_property("_y") + + def __eq__(self, other): + if not isinstance(other, EllipticCurvePublicNumbers): + return NotImplemented + + return ( + self.x == other.x and + self.y == other.y and + self.curve.name == other.curve.name and + self.curve.key_size == other.curve.key_size + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.x, self.y, self.curve.name, self.curve.key_size)) + + def __repr__(self): + return ( + "".format(self) + ) + + +class EllipticCurvePrivateNumbers(object): + def __init__(self, private_value, public_numbers): + if not isinstance(private_value, six.integer_types): + raise TypeError("private_value must be an integer.") + + if not isinstance(public_numbers, EllipticCurvePublicNumbers): + raise TypeError( + "public_numbers must be an EllipticCurvePublicNumbers " + "instance." + ) + + self._private_value = private_value + self._public_numbers = public_numbers + + def private_key(self, backend): + return backend.load_elliptic_curve_private_numbers(self) + + private_value = utils.read_only_property("_private_value") + public_numbers = utils.read_only_property("_public_numbers") + + def __eq__(self, other): + if not isinstance(other, EllipticCurvePrivateNumbers): + return NotImplemented + + return ( + self.private_value == other.private_value and + self.public_numbers == other.public_numbers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.private_value, self.public_numbers)) + + +class ECDH(object): + pass diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/padding.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/padding.py new file mode 100644 index 0000000..a37c3f9 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/padding.py @@ -0,0 +1,79 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import math + +import six + +from cryptography import utils +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric import rsa + + +@six.add_metaclass(abc.ABCMeta) +class AsymmetricPadding(object): + @abc.abstractproperty + def name(self): + """ + A string naming this padding (e.g. "PSS", "PKCS1"). + """ + + +@utils.register_interface(AsymmetricPadding) +class PKCS1v15(object): + name = "EMSA-PKCS1-v1_5" + + +@utils.register_interface(AsymmetricPadding) +class PSS(object): + MAX_LENGTH = object() + name = "EMSA-PSS" + + def __init__(self, mgf, salt_length): + self._mgf = mgf + + if (not isinstance(salt_length, six.integer_types) and + salt_length is not self.MAX_LENGTH): + raise TypeError("salt_length must be an integer.") + + if salt_length is not self.MAX_LENGTH and salt_length < 0: + raise ValueError("salt_length must be zero or greater.") + + self._salt_length = salt_length + + +@utils.register_interface(AsymmetricPadding) +class OAEP(object): + name = "EME-OAEP" + + def __init__(self, mgf, algorithm, label): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + + self._mgf = mgf + self._algorithm = algorithm + self._label = label + + +class MGF1(object): + MAX_LENGTH = object() + + def __init__(self, algorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + + self._algorithm = algorithm + + +def calculate_max_pss_salt_length(key, hash_algorithm): + if not isinstance(key, (rsa.RSAPrivateKey, rsa.RSAPublicKey)): + raise TypeError("key must be an RSA public or private key") + # bit length - 1 per RFC 3447 + emlen = int(math.ceil((key.key_size - 1) / 8.0)) + salt_length = emlen - hash_algorithm.digest_size - 2 + assert salt_length >= 0 + return salt_length diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/rsa.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/rsa.py new file mode 100644 index 0000000..27db671 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/rsa.py @@ -0,0 +1,368 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +try: + # Only available in math in 3.5+ + from math import gcd +except ImportError: + from fractions import gcd + +import six + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons +from cryptography.hazmat.backends.interfaces import RSABackend + + +@six.add_metaclass(abc.ABCMeta) +class RSAPrivateKey(object): + @abc.abstractmethod + def signer(self, padding, algorithm): + """ + Returns an AsymmetricSignatureContext used for signing data. + """ + + @abc.abstractmethod + def decrypt(self, ciphertext, padding): + """ + Decrypts the provided ciphertext. + """ + + @abc.abstractproperty + def key_size(self): + """ + The bit length of the public modulus. + """ + + @abc.abstractmethod + def public_key(self): + """ + The RSAPublicKey associated with this private key. + """ + + @abc.abstractmethod + def sign(self, data, padding, algorithm): + """ + Signs the data. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RSAPrivateKeyWithSerialization(RSAPrivateKey): + @abc.abstractmethod + def private_numbers(self): + """ + Returns an RSAPrivateNumbers. + """ + + @abc.abstractmethod + def private_bytes(self, encoding, format, encryption_algorithm): + """ + Returns the key serialized as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RSAPublicKey(object): + @abc.abstractmethod + def verifier(self, signature, padding, algorithm): + """ + Returns an AsymmetricVerificationContext used for verifying signatures. + """ + + @abc.abstractmethod + def encrypt(self, plaintext, padding): + """ + Encrypts the given plaintext. + """ + + @abc.abstractproperty + def key_size(self): + """ + The bit length of the public modulus. + """ + + @abc.abstractmethod + def public_numbers(self): + """ + Returns an RSAPublicNumbers + """ + + @abc.abstractmethod + def public_bytes(self, encoding, format): + """ + Returns the key serialized as bytes. + """ + + @abc.abstractmethod + def verify(self, signature, data, padding, algorithm): + """ + Verifies the signature of the data. + """ + + +RSAPublicKeyWithSerialization = RSAPublicKey + + +def generate_private_key(public_exponent, key_size, backend): + if not isinstance(backend, RSABackend): + raise UnsupportedAlgorithm( + "Backend object does not implement RSABackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + _verify_rsa_parameters(public_exponent, key_size) + return backend.generate_rsa_private_key(public_exponent, key_size) + + +def _verify_rsa_parameters(public_exponent, key_size): + if public_exponent < 3: + raise ValueError("public_exponent must be >= 3.") + + if public_exponent & 1 == 0: + raise ValueError("public_exponent must be odd.") + + if key_size < 512: + raise ValueError("key_size must be at least 512-bits.") + + +def _check_private_key_components(p, q, private_exponent, dmp1, dmq1, iqmp, + public_exponent, modulus): + if modulus < 3: + raise ValueError("modulus must be >= 3.") + + if p >= modulus: + raise ValueError("p must be < modulus.") + + if q >= modulus: + raise ValueError("q must be < modulus.") + + if dmp1 >= modulus: + raise ValueError("dmp1 must be < modulus.") + + if dmq1 >= modulus: + raise ValueError("dmq1 must be < modulus.") + + if iqmp >= modulus: + raise ValueError("iqmp must be < modulus.") + + if private_exponent >= modulus: + raise ValueError("private_exponent must be < modulus.") + + if public_exponent < 3 or public_exponent >= modulus: + raise ValueError("public_exponent must be >= 3 and < modulus.") + + if public_exponent & 1 == 0: + raise ValueError("public_exponent must be odd.") + + if dmp1 & 1 == 0: + raise ValueError("dmp1 must be odd.") + + if dmq1 & 1 == 0: + raise ValueError("dmq1 must be odd.") + + if p * q != modulus: + raise ValueError("p*q must equal modulus.") + + +def _check_public_key_components(e, n): + if n < 3: + raise ValueError("n must be >= 3.") + + if e < 3 or e >= n: + raise ValueError("e must be >= 3 and < n.") + + if e & 1 == 0: + raise ValueError("e must be odd.") + + +def _modinv(e, m): + """ + Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1 + """ + x1, y1, x2, y2 = 1, 0, 0, 1 + a, b = e, m + while b > 0: + q, r = divmod(a, b) + xn, yn = x1 - q * x2, y1 - q * y2 + a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn + return x1 % m + + +def rsa_crt_iqmp(p, q): + """ + Compute the CRT (q ** -1) % p value from RSA primes p and q. + """ + return _modinv(q, p) + + +def rsa_crt_dmp1(private_exponent, p): + """ + Compute the CRT private_exponent % (p - 1) value from the RSA + private_exponent (d) and p. + """ + return private_exponent % (p - 1) + + +def rsa_crt_dmq1(private_exponent, q): + """ + Compute the CRT private_exponent % (q - 1) value from the RSA + private_exponent (d) and q. + """ + return private_exponent % (q - 1) + + +# Controls the number of iterations rsa_recover_prime_factors will perform +# to obtain the prime factors. Each iteration increments by 2 so the actual +# maximum attempts is half this number. +_MAX_RECOVERY_ATTEMPTS = 1000 + + +def rsa_recover_prime_factors(n, e, d): + """ + Compute factors p and q from the private exponent d. We assume that n has + no more than two factors. This function is adapted from code in PyCrypto. + """ + # See 8.2.2(i) in Handbook of Applied Cryptography. + ktot = d * e - 1 + # The quantity d*e-1 is a multiple of phi(n), even, + # and can be represented as t*2^s. + t = ktot + while t % 2 == 0: + t = t // 2 + # Cycle through all multiplicative inverses in Zn. + # The algorithm is non-deterministic, but there is a 50% chance + # any candidate a leads to successful factoring. + # See "Digitalized Signatures and Public Key Functions as Intractable + # as Factorization", M. Rabin, 1979 + spotted = False + a = 2 + while not spotted and a < _MAX_RECOVERY_ATTEMPTS: + k = t + # Cycle through all values a^{t*2^i}=a^k + while k < ktot: + cand = pow(a, k, n) + # Check if a^k is a non-trivial root of unity (mod n) + if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1: + # We have found a number such that (cand-1)(cand+1)=0 (mod n). + # Either of the terms divides n. + p = gcd(cand + 1, n) + spotted = True + break + k *= 2 + # This value was not any good... let's try another! + a += 2 + if not spotted: + raise ValueError("Unable to compute factors p and q from exponent d.") + # Found ! + q, r = divmod(n, p) + assert r == 0 + p, q = sorted((p, q), reverse=True) + return (p, q) + + +class RSAPrivateNumbers(object): + def __init__(self, p, q, d, dmp1, dmq1, iqmp, + public_numbers): + if ( + not isinstance(p, six.integer_types) or + not isinstance(q, six.integer_types) or + not isinstance(d, six.integer_types) or + not isinstance(dmp1, six.integer_types) or + not isinstance(dmq1, six.integer_types) or + not isinstance(iqmp, six.integer_types) + ): + raise TypeError( + "RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must" + " all be an integers." + ) + + if not isinstance(public_numbers, RSAPublicNumbers): + raise TypeError( + "RSAPrivateNumbers public_numbers must be an RSAPublicNumbers" + " instance." + ) + + self._p = p + self._q = q + self._d = d + self._dmp1 = dmp1 + self._dmq1 = dmq1 + self._iqmp = iqmp + self._public_numbers = public_numbers + + p = utils.read_only_property("_p") + q = utils.read_only_property("_q") + d = utils.read_only_property("_d") + dmp1 = utils.read_only_property("_dmp1") + dmq1 = utils.read_only_property("_dmq1") + iqmp = utils.read_only_property("_iqmp") + public_numbers = utils.read_only_property("_public_numbers") + + def private_key(self, backend): + return backend.load_rsa_private_numbers(self) + + def __eq__(self, other): + if not isinstance(other, RSAPrivateNumbers): + return NotImplemented + + return ( + self.p == other.p and + self.q == other.q and + self.d == other.d and + self.dmp1 == other.dmp1 and + self.dmq1 == other.dmq1 and + self.iqmp == other.iqmp and + self.public_numbers == other.public_numbers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(( + self.p, + self.q, + self.d, + self.dmp1, + self.dmq1, + self.iqmp, + self.public_numbers, + )) + + +class RSAPublicNumbers(object): + def __init__(self, e, n): + if ( + not isinstance(e, six.integer_types) or + not isinstance(n, six.integer_types) + ): + raise TypeError("RSAPublicNumbers arguments must be integers.") + + self._e = e + self._n = n + + e = utils.read_only_property("_e") + n = utils.read_only_property("_n") + + def public_key(self, backend): + return backend.load_rsa_public_numbers(self) + + def __repr__(self): + return "".format(self) + + def __eq__(self, other): + if not isinstance(other, RSAPublicNumbers): + return NotImplemented + + return self.e == other.e and self.n == other.n + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.e, self.n)) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/utils.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/utils.py new file mode 100644 index 0000000..ef1e7eb --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/utils.py @@ -0,0 +1,60 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import warnings + +from asn1crypto.algos import DSASignature + +import six + +from cryptography import utils +from cryptography.hazmat.primitives import hashes + + +def decode_rfc6979_signature(signature): + warnings.warn( + "decode_rfc6979_signature is deprecated and will " + "be removed in a future version, use decode_dss_signature instead.", + utils.PersistentlyDeprecated, + stacklevel=2 + ) + return decode_dss_signature(signature) + + +def decode_dss_signature(signature): + data = DSASignature.load(signature, strict=True).native + return data['r'], data['s'] + + +def encode_rfc6979_signature(r, s): + warnings.warn( + "encode_rfc6979_signature is deprecated and will " + "be removed in a future version, use encode_dss_signature instead.", + utils.PersistentlyDeprecated, + stacklevel=2 + ) + return encode_dss_signature(r, s) + + +def encode_dss_signature(r, s): + if ( + not isinstance(r, six.integer_types) or + not isinstance(s, six.integer_types) + ): + raise ValueError("Both r and s must be integers") + + return DSASignature({'r': r, 's': s}).dump() + + +class Prehashed(object): + def __init__(self, algorithm): + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of HashAlgorithm.") + + self._algorithm = algorithm + self._digest_size = algorithm.digest_size + + digest_size = utils.read_only_property("_digest_size") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/x25519.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/x25519.py new file mode 100644 index 0000000..5c4652a --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/asymmetric/x25519.py @@ -0,0 +1,54 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography.exceptions import UnsupportedAlgorithm, _Reasons + + +@six.add_metaclass(abc.ABCMeta) +class X25519PublicKey(object): + @classmethod + def from_public_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + return backend.x25519_load_public_bytes(data) + + @abc.abstractmethod + def public_bytes(self): + pass + + +@six.add_metaclass(abc.ABCMeta) +class X25519PrivateKey(object): + @classmethod + def generate(cls): + from cryptography.hazmat.backends.openssl.backend import backend + if not backend.x25519_supported(): + raise UnsupportedAlgorithm( + "X25519 is not supported by this version of OpenSSL.", + _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM + ) + return backend.x25519_generate_key() + + @classmethod + def _from_private_bytes(cls, data): + from cryptography.hazmat.backends.openssl.backend import backend + return backend.x25519_load_private_bytes(data) + + @abc.abstractmethod + def public_key(self): + pass + + @abc.abstractmethod + def exchange(self, peer_public_key): + pass diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/__init__.py new file mode 100644 index 0000000..171b1c6 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/__init__.py @@ -0,0 +1,21 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.hazmat.primitives.ciphers.base import ( + AEADCipherContext, AEADDecryptionContext, AEADEncryptionContext, + BlockCipherAlgorithm, Cipher, CipherAlgorithm, CipherContext +) + + +__all__ = [ + "Cipher", + "CipherAlgorithm", + "BlockCipherAlgorithm", + "CipherContext", + "AEADCipherContext", + "AEADDecryptionContext", + "AEADEncryptionContext", +] diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/aead.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/aead.py new file mode 100644 index 0000000..e519765 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/aead.py @@ -0,0 +1,188 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import os + +from cryptography import exceptions, utils +from cryptography.hazmat.backends.openssl import aead +from cryptography.hazmat.backends.openssl.backend import backend + + +class ChaCha20Poly1305(object): + _MAX_SIZE = 2 ** 32 + + def __init__(self, key): + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "ChaCha20Poly1305 is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER + ) + utils._check_bytes("key", key) + + if len(key) != 32: + raise ValueError("ChaCha20Poly1305 key must be 32 bytes.") + + self._key = key + + @classmethod + def generate_key(cls): + return os.urandom(32) + + def encrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**32 bytes" + ) + + self._check_params(nonce, data, associated_data) + return aead._encrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def decrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def _check_params(self, nonce, data, associated_data): + utils._check_bytes("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if len(nonce) != 12: + raise ValueError("Nonce must be 12 bytes") + + +class AESCCM(object): + _MAX_SIZE = 2 ** 32 + + def __init__(self, key, tag_length=16): + utils._check_bytes("key", key) + if len(key) not in (16, 24, 32): + raise ValueError("AESCCM key must be 128, 192, or 256 bits.") + + self._key = key + if not isinstance(tag_length, int): + raise TypeError("tag_length must be an integer") + + if tag_length not in (4, 6, 8, 12, 14, 16): + raise ValueError("Invalid tag_length") + + self._tag_length = tag_length + + if not backend.aead_cipher_supported(self): + raise exceptions.UnsupportedAlgorithm( + "AESCCM is not supported by this version of OpenSSL", + exceptions._Reasons.UNSUPPORTED_CIPHER + ) + + @classmethod + def generate_key(cls, bit_length): + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (128, 192, 256): + raise ValueError("bit_length must be 128, 192, or 256") + + return os.urandom(bit_length // 8) + + def encrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**32 bytes" + ) + + self._check_params(nonce, data, associated_data) + self._validate_lengths(nonce, len(data)) + return aead._encrypt( + backend, self, nonce, data, associated_data, self._tag_length + ) + + def decrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt( + backend, self, nonce, data, associated_data, self._tag_length + ) + + def _validate_lengths(self, nonce, data_len): + # For information about computing this, see + # https://tools.ietf.org/html/rfc3610#section-2.1 + l_val = 15 - len(nonce) + if 2 ** (8 * l_val) < data_len: + raise ValueError("Nonce too long for data") + + def _check_params(self, nonce, data, associated_data): + utils._check_bytes("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if not 7 <= len(nonce) <= 13: + raise ValueError("Nonce must be between 7 and 13 bytes") + + +class AESGCM(object): + _MAX_SIZE = 2 ** 32 + + def __init__(self, key): + utils._check_bytes("key", key) + if len(key) not in (16, 24, 32): + raise ValueError("AESGCM key must be 128, 192, or 256 bits.") + + self._key = key + + @classmethod + def generate_key(cls, bit_length): + if not isinstance(bit_length, int): + raise TypeError("bit_length must be an integer") + + if bit_length not in (128, 192, 256): + raise ValueError("bit_length must be 128, 192, or 256") + + return os.urandom(bit_length // 8) + + def encrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE: + # This is OverflowError to match what cffi would raise + raise OverflowError( + "Data or associated data too long. Max 2**32 bytes" + ) + + self._check_params(nonce, data, associated_data) + return aead._encrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def decrypt(self, nonce, data, associated_data): + if associated_data is None: + associated_data = b"" + + self._check_params(nonce, data, associated_data) + return aead._decrypt( + backend, self, nonce, data, associated_data, 16 + ) + + def _check_params(self, nonce, data, associated_data): + utils._check_bytes("nonce", nonce) + utils._check_bytes("data", data) + utils._check_bytes("associated_data", associated_data) + if len(nonce) == 0: + raise ValueError("Nonce must be at least 1 byte") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/algorithms.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/algorithms.py new file mode 100644 index 0000000..68a9e33 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/algorithms.py @@ -0,0 +1,168 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.hazmat.primitives.ciphers import ( + BlockCipherAlgorithm, CipherAlgorithm +) +from cryptography.hazmat.primitives.ciphers.modes import ModeWithNonce + + +def _verify_key_size(algorithm, key): + # Verify that the key is instance of bytes + utils._check_bytes("key", key) + + # Verify that the key size matches the expected key size + if len(key) * 8 not in algorithm.key_sizes: + raise ValueError("Invalid key size ({0}) for {1}.".format( + len(key) * 8, algorithm.name + )) + return key + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class AES(object): + name = "AES" + block_size = 128 + # 512 added to support AES-256-XTS, which uses 512-bit keys + key_sizes = frozenset([128, 192, 256, 512]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class Camellia(object): + name = "camellia" + block_size = 128 + key_sizes = frozenset([128, 192, 256]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class TripleDES(object): + name = "3DES" + block_size = 64 + key_sizes = frozenset([64, 128, 192]) + + def __init__(self, key): + if len(key) == 8: + key += key + key + elif len(key) == 16: + key += key[:8] + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class Blowfish(object): + name = "Blowfish" + block_size = 64 + key_sizes = frozenset(range(32, 449, 8)) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class CAST5(object): + name = "CAST5" + block_size = 64 + key_sizes = frozenset(range(40, 129, 8)) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(CipherAlgorithm) +class ARC4(object): + name = "RC4" + key_sizes = frozenset([40, 56, 64, 80, 128, 160, 192, 256]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(CipherAlgorithm) +class IDEA(object): + name = "IDEA" + block_size = 64 + key_sizes = frozenset([128]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(BlockCipherAlgorithm) +@utils.register_interface(CipherAlgorithm) +class SEED(object): + name = "SEED" + block_size = 128 + key_sizes = frozenset([128]) + + def __init__(self, key): + self.key = _verify_key_size(self, key) + + @property + def key_size(self): + return len(self.key) * 8 + + +@utils.register_interface(CipherAlgorithm) +@utils.register_interface(ModeWithNonce) +class ChaCha20(object): + name = "ChaCha20" + key_sizes = frozenset([256]) + + def __init__(self, key, nonce): + self.key = _verify_key_size(self, key) + if not isinstance(nonce, bytes): + raise TypeError("nonce must be bytes") + + if len(nonce) != 16: + raise ValueError("nonce must be 128-bits (16 bytes)") + + self._nonce = nonce + + nonce = utils.read_only_property("_nonce") + + @property + def key_size(self): + return len(self.key) * 8 diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/base.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/base.py new file mode 100644 index 0000000..f857041 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/base.py @@ -0,0 +1,235 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, AlreadyUpdated, NotYetFinalized, UnsupportedAlgorithm, + _Reasons +) +from cryptography.hazmat.backends.interfaces import CipherBackend +from cryptography.hazmat.primitives.ciphers import modes + + +@six.add_metaclass(abc.ABCMeta) +class CipherAlgorithm(object): + @abc.abstractproperty + def name(self): + """ + A string naming this mode (e.g. "AES", "Camellia"). + """ + + @abc.abstractproperty + def key_size(self): + """ + The size of the key being used as an integer in bits (e.g. 128, 256). + """ + + +@six.add_metaclass(abc.ABCMeta) +class BlockCipherAlgorithm(object): + @abc.abstractproperty + def block_size(self): + """ + The size of a block as an integer in bits (e.g. 64, 128). + """ + + +@six.add_metaclass(abc.ABCMeta) +class CipherContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes through the cipher and returns the results + as bytes. + """ + + @abc.abstractmethod + def update_into(self, data, buf): + """ + Processes the provided bytes and writes the resulting data into the + provided buffer. Returns the number of bytes written. + """ + + @abc.abstractmethod + def finalize(self): + """ + Returns the results of processing the final block as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AEADCipherContext(object): + @abc.abstractmethod + def authenticate_additional_data(self, data): + """ + Authenticates the provided bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AEADDecryptionContext(object): + @abc.abstractmethod + def finalize_with_tag(self, tag): + """ + Returns the results of processing the final block as bytes and allows + delayed passing of the authentication tag. + """ + + +@six.add_metaclass(abc.ABCMeta) +class AEADEncryptionContext(object): + @abc.abstractproperty + def tag(self): + """ + Returns tag bytes. This is only available after encryption is + finalized. + """ + + +class Cipher(object): + def __init__(self, algorithm, mode, backend): + if not isinstance(backend, CipherBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement CipherBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, CipherAlgorithm): + raise TypeError("Expected interface of CipherAlgorithm.") + + if mode is not None: + mode.validate_for_algorithm(algorithm) + + self.algorithm = algorithm + self.mode = mode + self._backend = backend + + def encryptor(self): + if isinstance(self.mode, modes.ModeWithAuthenticationTag): + if self.mode.tag is not None: + raise ValueError( + "Authentication tag must be None when encrypting." + ) + ctx = self._backend.create_symmetric_encryption_ctx( + self.algorithm, self.mode + ) + return self._wrap_ctx(ctx, encrypt=True) + + def decryptor(self): + ctx = self._backend.create_symmetric_decryption_ctx( + self.algorithm, self.mode + ) + return self._wrap_ctx(ctx, encrypt=False) + + def _wrap_ctx(self, ctx, encrypt): + if isinstance(self.mode, modes.ModeWithAuthenticationTag): + if encrypt: + return _AEADEncryptionContext(ctx) + else: + return _AEADCipherContext(ctx) + else: + return _CipherContext(ctx) + + +@utils.register_interface(CipherContext) +class _CipherContext(object): + def __init__(self, ctx): + self._ctx = ctx + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return self._ctx.update(data) + + def update_into(self, data, buf): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return self._ctx.update_into(data, buf) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize() + self._ctx = None + return data + + +@utils.register_interface(AEADCipherContext) +@utils.register_interface(CipherContext) +@utils.register_interface(AEADDecryptionContext) +class _AEADCipherContext(object): + def __init__(self, ctx): + self._ctx = ctx + self._bytes_processed = 0 + self._aad_bytes_processed = 0 + self._tag = None + self._updated = False + + def _check_limit(self, data_size): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + self._updated = True + self._bytes_processed += data_size + if self._bytes_processed > self._ctx._mode._MAX_ENCRYPTED_BYTES: + raise ValueError( + "{0} has a maximum encrypted byte limit of {1}".format( + self._ctx._mode.name, self._ctx._mode._MAX_ENCRYPTED_BYTES + ) + ) + + def update(self, data): + self._check_limit(len(data)) + return self._ctx.update(data) + + def update_into(self, data, buf): + self._check_limit(len(data)) + return self._ctx.update_into(data, buf) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize() + self._tag = self._ctx.tag + self._ctx = None + return data + + def finalize_with_tag(self, tag): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + data = self._ctx.finalize_with_tag(tag) + self._tag = self._ctx.tag + self._ctx = None + return data + + def authenticate_additional_data(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if self._updated: + raise AlreadyUpdated("Update has been called on this context.") + + self._aad_bytes_processed += len(data) + if self._aad_bytes_processed > self._ctx._mode._MAX_AAD_BYTES: + raise ValueError( + "{0} has a maximum AAD byte limit of {1}".format( + self._ctx._mode.name, self._ctx._mode._MAX_AAD_BYTES + ) + ) + + self._ctx.authenticate_additional_data(data) + + +@utils.register_interface(AEADEncryptionContext) +class _AEADEncryptionContext(_AEADCipherContext): + @property + def tag(self): + if self._ctx is not None: + raise NotYetFinalized("You must finalize encryption before " + "getting the tag.") + return self._tag diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/modes.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/modes.py new file mode 100644 index 0000000..e82c1a8 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/ciphers/modes.py @@ -0,0 +1,231 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils + + +@six.add_metaclass(abc.ABCMeta) +class Mode(object): + @abc.abstractproperty + def name(self): + """ + A string naming this mode (e.g. "ECB", "CBC"). + """ + + @abc.abstractmethod + def validate_for_algorithm(self, algorithm): + """ + Checks that all the necessary invariants of this (mode, algorithm) + combination are met. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithInitializationVector(object): + @abc.abstractproperty + def initialization_vector(self): + """ + The value of the initialization vector for this mode as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithTweak(object): + @abc.abstractproperty + def tweak(self): + """ + The value of the tweak for this mode as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithNonce(object): + @abc.abstractproperty + def nonce(self): + """ + The value of the nonce for this mode as bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class ModeWithAuthenticationTag(object): + @abc.abstractproperty + def tag(self): + """ + The value of the tag supplied to the constructor of this mode. + """ + + +def _check_aes_key_length(self, algorithm): + if algorithm.key_size > 256 and algorithm.name == "AES": + raise ValueError( + "Only 128, 192, and 256 bit keys are allowed for this AES mode" + ) + + +def _check_iv_length(self, algorithm): + if len(self.initialization_vector) * 8 != algorithm.block_size: + raise ValueError("Invalid IV size ({0}) for {1}.".format( + len(self.initialization_vector), self.name + )) + + +def _check_iv_and_key_length(self, algorithm): + _check_aes_key_length(self, algorithm) + _check_iv_length(self, algorithm) + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class CBC(object): + name = "CBC" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithTweak) +class XTS(object): + name = "XTS" + + def __init__(self, tweak): + if not isinstance(tweak, bytes): + raise TypeError("tweak must be bytes") + + if len(tweak) != 16: + raise ValueError("tweak must be 128-bits (16 bytes)") + + self._tweak = tweak + + tweak = utils.read_only_property("_tweak") + + def validate_for_algorithm(self, algorithm): + if algorithm.key_size not in (256, 512): + raise ValueError( + "The XTS specification requires a 256-bit key for AES-128-XTS" + " and 512-bit key for AES-256-XTS" + ) + + +@utils.register_interface(Mode) +class ECB(object): + name = "ECB" + + validate_for_algorithm = _check_aes_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class OFB(object): + name = "OFB" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class CFB(object): + name = "CFB" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +class CFB8(object): + name = "CFB8" + + def __init__(self, initialization_vector): + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + + self._initialization_vector = initialization_vector + + initialization_vector = utils.read_only_property("_initialization_vector") + validate_for_algorithm = _check_iv_and_key_length + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithNonce) +class CTR(object): + name = "CTR" + + def __init__(self, nonce): + if not isinstance(nonce, bytes): + raise TypeError("nonce must be bytes") + + self._nonce = nonce + + nonce = utils.read_only_property("_nonce") + + def validate_for_algorithm(self, algorithm): + _check_aes_key_length(self, algorithm) + if len(self.nonce) * 8 != algorithm.block_size: + raise ValueError("Invalid nonce size ({0}) for {1}.".format( + len(self.nonce), self.name + )) + + +@utils.register_interface(Mode) +@utils.register_interface(ModeWithInitializationVector) +@utils.register_interface(ModeWithAuthenticationTag) +class GCM(object): + name = "GCM" + _MAX_ENCRYPTED_BYTES = (2 ** 39 - 256) // 8 + _MAX_AAD_BYTES = (2 ** 64) // 8 + + def __init__(self, initialization_vector, tag=None, min_tag_length=16): + # len(initialization_vector) must in [1, 2 ** 64), but it's impossible + # to actually construct a bytes object that large, so we don't check + # for it + if not isinstance(initialization_vector, bytes): + raise TypeError("initialization_vector must be bytes") + if len(initialization_vector) == 0: + raise ValueError("initialization_vector must be at least 1 byte") + self._initialization_vector = initialization_vector + if tag is not None: + if not isinstance(tag, bytes): + raise TypeError("tag must be bytes or None") + if min_tag_length < 4: + raise ValueError("min_tag_length must be >= 4") + if len(tag) < min_tag_length: + raise ValueError( + "Authentication tag must be {0} bytes or longer.".format( + min_tag_length) + ) + self._tag = tag + self._min_tag_length = min_tag_length + + tag = utils.read_only_property("_tag") + initialization_vector = utils.read_only_property("_initialization_vector") + + def validate_for_algorithm(self, algorithm): + _check_aes_key_length(self, algorithm) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/cmac.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/cmac.py new file mode 100644 index 0000000..77537f0 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/cmac.py @@ -0,0 +1,66 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import CMACBackend +from cryptography.hazmat.primitives import ciphers, mac + + +@utils.register_interface(mac.MACContext) +class CMAC(object): + def __init__(self, algorithm, backend, ctx=None): + if not isinstance(backend, CMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement CMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, ciphers.BlockCipherAlgorithm): + raise TypeError( + "Expected instance of BlockCipherAlgorithm." + ) + self._algorithm = algorithm + + self._backend = backend + if ctx is None: + self._ctx = self._backend.create_cmac_ctx(self._algorithm) + else: + self._ctx = ctx + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + self._ctx.update(data) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + def verify(self, signature): + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(signature) + + def copy(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return CMAC( + self._algorithm, + backend=self._backend, + ctx=self._ctx.copy() + ) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/constant_time.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/constant_time.py new file mode 100644 index 0000000..0e987ea --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/constant_time.py @@ -0,0 +1,35 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import hmac +import warnings + +from cryptography import utils +from cryptography.hazmat.bindings._constant_time import lib + + +if hasattr(hmac, "compare_digest"): + def bytes_eq(a, b): + if not isinstance(a, bytes) or not isinstance(b, bytes): + raise TypeError("a and b must be bytes.") + + return hmac.compare_digest(a, b) + +else: + warnings.warn( + "Support for your Python version is deprecated. The next version of " + "cryptography will remove support. Please upgrade to a 2.7.x " + "release that supports hmac.compare_digest as soon as possible.", + utils.DeprecatedIn23, + ) + + def bytes_eq(a, b): + if not isinstance(a, bytes) or not isinstance(b, bytes): + raise TypeError("a and b must be bytes.") + + return lib.Cryptography_constant_time_bytes_eq( + a, len(a), b, len(b) + ) == 1 diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/hashes.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/hashes.py new file mode 100644 index 0000000..3f3aadd --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/hashes.py @@ -0,0 +1,185 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HashBackend + + +@six.add_metaclass(abc.ABCMeta) +class HashAlgorithm(object): + @abc.abstractproperty + def name(self): + """ + A string naming this algorithm (e.g. "sha256", "md5"). + """ + + @abc.abstractproperty + def digest_size(self): + """ + The size of the resulting digest in bytes. + """ + + +@six.add_metaclass(abc.ABCMeta) +class HashContext(object): + @abc.abstractproperty + def algorithm(self): + """ + A HashAlgorithm that will be used by this context. + """ + + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes through the hash. + """ + + @abc.abstractmethod + def finalize(self): + """ + Finalizes the hash context and returns the hash digest as bytes. + """ + + @abc.abstractmethod + def copy(self): + """ + Return a HashContext that is a copy of the current context. + """ + + +@utils.register_interface(HashContext) +class Hash(object): + def __init__(self, algorithm, backend, ctx=None): + if not isinstance(backend, HashBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HashBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + self._algorithm = algorithm + + self._backend = backend + + if ctx is None: + self._ctx = self._backend.create_hash_ctx(self.algorithm) + else: + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + self._ctx.update(data) + + def copy(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return Hash( + self.algorithm, backend=self._backend, ctx=self._ctx.copy() + ) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + +@utils.register_interface(HashAlgorithm) +class SHA1(object): + name = "sha1" + digest_size = 20 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class SHA224(object): + name = "sha224" + digest_size = 28 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class SHA256(object): + name = "sha256" + digest_size = 32 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class SHA384(object): + name = "sha384" + digest_size = 48 + block_size = 128 + + +@utils.register_interface(HashAlgorithm) +class SHA512(object): + name = "sha512" + digest_size = 64 + block_size = 128 + + +@utils.register_interface(HashAlgorithm) +class MD5(object): + name = "md5" + digest_size = 16 + block_size = 64 + + +@utils.register_interface(HashAlgorithm) +class BLAKE2b(object): + name = "blake2b" + _max_digest_size = 64 + _min_digest_size = 1 + block_size = 128 + + def __init__(self, digest_size): + if ( + digest_size > self._max_digest_size or + digest_size < self._min_digest_size + ): + raise ValueError("Digest size must be {0}-{1}".format( + self._min_digest_size, self._max_digest_size) + ) + + self._digest_size = digest_size + + digest_size = utils.read_only_property("_digest_size") + + +@utils.register_interface(HashAlgorithm) +class BLAKE2s(object): + name = "blake2s" + block_size = 64 + _max_digest_size = 32 + _min_digest_size = 1 + + def __init__(self, digest_size): + if ( + digest_size > self._max_digest_size or + digest_size < self._min_digest_size + ): + raise ValueError("Digest size must be {0}-{1}".format( + self._min_digest_size, self._max_digest_size) + ) + + self._digest_size = digest_size + + digest_size = utils.read_only_property("_digest_size") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/hmac.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/hmac.py new file mode 100644 index 0000000..2e9a4e2 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/hmac.py @@ -0,0 +1,69 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import hashes, mac + + +@utils.register_interface(mac.MACContext) +@utils.register_interface(hashes.HashContext) +class HMAC(object): + def __init__(self, key, algorithm, backend, ctx=None): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, hashes.HashAlgorithm): + raise TypeError("Expected instance of hashes.HashAlgorithm.") + self._algorithm = algorithm + + self._backend = backend + self._key = key + if ctx is None: + self._ctx = self._backend.create_hmac_ctx(key, self.algorithm) + else: + self._ctx = ctx + + algorithm = utils.read_only_property("_algorithm") + + def update(self, data): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + self._ctx.update(data) + + def copy(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + return HMAC( + self._key, + self.algorithm, + backend=self._backend, + ctx=self._ctx.copy() + ) + + def finalize(self): + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + digest = self._ctx.finalize() + self._ctx = None + return digest + + def verify(self, signature): + if not isinstance(signature, bytes): + raise TypeError("signature must be bytes.") + if self._ctx is None: + raise AlreadyFinalized("Context was already finalized.") + + ctx, self._ctx = self._ctx, None + ctx.verify(signature) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/__init__.py new file mode 100644 index 0000000..2d0724e --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/__init__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class KeyDerivationFunction(object): + @abc.abstractmethod + def derive(self, key_material): + """ + Deterministically generates and returns a new key based on the existing + key material. + """ + + @abc.abstractmethod + def verify(self, key_material, expected_key): + """ + Checks whether the key generated by the key material matches the + expected derived key. Raises an exception if they do not match. + """ diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/concatkdf.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/concatkdf.py new file mode 100644 index 0000000..c6399e4 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/concatkdf.py @@ -0,0 +1,125 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.backends.interfaces import HashBackend +from cryptography.hazmat.primitives import constant_time, hashes, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +def _int_to_u32be(n): + return struct.pack('>I', n) + + +def _common_args_checks(algorithm, length, otherinfo): + max_length = algorithm.digest_size * (2 ** 32 - 1) + if length > max_length: + raise ValueError( + "Can not derive keys larger than {0} bits.".format( + max_length + )) + if not (otherinfo is None or isinstance(otherinfo, bytes)): + raise TypeError("otherinfo must be bytes.") + + +def _concatkdf_derive(key_material, length, auxfn, otherinfo): + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + output = [b""] + outlen = 0 + counter = 1 + + while (length > outlen): + h = auxfn() + h.update(_int_to_u32be(counter)) + h.update(key_material) + h.update(otherinfo) + output.append(h.finalize()) + outlen += len(output[-1]) + counter += 1 + + return b"".join(output)[:length] + + +@utils.register_interface(KeyDerivationFunction) +class ConcatKDFHash(object): + def __init__(self, algorithm, length, otherinfo, backend): + + _common_args_checks(algorithm, length, otherinfo) + self._algorithm = algorithm + self._length = length + self._otherinfo = otherinfo + if self._otherinfo is None: + self._otherinfo = b"" + + if not isinstance(backend, HashBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HashBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + self._backend = backend + self._used = False + + def _hash(self): + return hashes.Hash(self._algorithm, self._backend) + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + self._used = True + return _concatkdf_derive(key_material, self._length, + self._hash, self._otherinfo) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey + + +@utils.register_interface(KeyDerivationFunction) +class ConcatKDFHMAC(object): + def __init__(self, algorithm, length, salt, otherinfo, backend): + + _common_args_checks(algorithm, length, otherinfo) + self._algorithm = algorithm + self._length = length + self._otherinfo = otherinfo + if self._otherinfo is None: + self._otherinfo = b"" + + if not (salt is None or isinstance(salt, bytes)): + raise TypeError("salt must be bytes.") + if salt is None: + salt = b"\x00" * algorithm.block_size + self._salt = salt + + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + self._backend = backend + self._used = False + + def _hmac(self): + return hmac.HMAC(self._salt, self._algorithm, self._backend) + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + self._used = True + return _concatkdf_derive(key_material, self._length, + self._hmac, self._otherinfo) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/hkdf.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/hkdf.py new file mode 100644 index 0000000..917b4e9 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/hkdf.py @@ -0,0 +1,116 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import six + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +@utils.register_interface(KeyDerivationFunction) +class HKDF(object): + def __init__(self, algorithm, length, salt, info, backend): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._algorithm = algorithm + + if not (salt is None or isinstance(salt, bytes)): + raise TypeError("salt must be bytes.") + + if salt is None: + salt = b"\x00" * self._algorithm.digest_size + + self._salt = salt + + self._backend = backend + + self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend) + + def _extract(self, key_material): + h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend) + h.update(key_material) + return h.finalize() + + def derive(self, key_material): + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + return self._hkdf_expand.derive(self._extract(key_material)) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey + + +@utils.register_interface(KeyDerivationFunction) +class HKDFExpand(object): + def __init__(self, algorithm, length, info, backend): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._algorithm = algorithm + + self._backend = backend + + max_length = 255 * algorithm.digest_size + + if length > max_length: + raise ValueError( + "Can not derive keys larger than {0} octets.".format( + max_length + )) + + self._length = length + + if not (info is None or isinstance(info, bytes)): + raise TypeError("info must be bytes.") + + if info is None: + info = b"" + + self._info = info + + self._used = False + + def _expand(self, key_material): + output = [b""] + counter = 1 + + while self._algorithm.digest_size * (len(output) - 1) < self._length: + h = hmac.HMAC(key_material, self._algorithm, backend=self._backend) + h.update(output[-1]) + h.update(self._info) + h.update(six.int2byte(counter)) + output.append(h.finalize()) + counter += 1 + + return b"".join(output)[:self._length] + + def derive(self, key_material): + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + if self._used: + raise AlreadyFinalized + + self._used = True + return self._expand(key_material) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/kbkdf.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/kbkdf.py new file mode 100644 index 0000000..14de56e --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/kbkdf.py @@ -0,0 +1,148 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from enum import Enum + +from six.moves import range + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time, hashes, hmac +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +class Mode(Enum): + CounterMode = "ctr" + + +class CounterLocation(Enum): + BeforeFixed = "before_fixed" + AfterFixed = "after_fixed" + + +@utils.register_interface(KeyDerivationFunction) +class KBKDFHMAC(object): + def __init__(self, algorithm, mode, length, rlen, llen, + location, label, context, fixed, backend): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not isinstance(algorithm, hashes.HashAlgorithm): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported hash algorithm.", + _Reasons.UNSUPPORTED_HASH + ) + + if not backend.hmac_supported(algorithm): + raise UnsupportedAlgorithm( + "Algorithm supplied is not a supported hmac algorithm.", + _Reasons.UNSUPPORTED_HASH + ) + + if not isinstance(mode, Mode): + raise TypeError("mode must be of type Mode") + + if not isinstance(location, CounterLocation): + raise TypeError("location must be of type CounterLocation") + + if (label or context) and fixed: + raise ValueError("When supplying fixed data, " + "label and context are ignored.") + + if rlen is None or not self._valid_byte_length(rlen): + raise ValueError("rlen must be between 1 and 4") + + if llen is None and fixed is None: + raise ValueError("Please specify an llen") + + if llen is not None and not isinstance(llen, int): + raise TypeError("llen must be an integer") + + if label is None: + label = b'' + + if context is None: + context = b'' + + if (not isinstance(label, bytes) or + not isinstance(context, bytes)): + raise TypeError('label and context must be of type bytes') + + self._algorithm = algorithm + self._mode = mode + self._length = length + self._rlen = rlen + self._llen = llen + self._location = location + self._label = label + self._context = context + self._backend = backend + self._used = False + self._fixed_data = fixed + + def _valid_byte_length(self, value): + if not isinstance(value, int): + raise TypeError('value must be of type int') + + value_bin = utils.int_to_bytes(1, value) + if not 1 <= len(value_bin) <= 4: + return False + return True + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + + if not isinstance(key_material, bytes): + raise TypeError('key_material must be bytes') + self._used = True + + # inverse floor division (equivalent to ceiling) + rounds = -(-self._length // self._algorithm.digest_size) + + output = [b''] + + # For counter mode, the number of iterations shall not be + # larger than 2^r-1, where r <= 32 is the binary length of the counter + # This ensures that the counter values used as an input to the + # PRF will not repeat during a particular call to the KDF function. + r_bin = utils.int_to_bytes(1, self._rlen) + if rounds > pow(2, len(r_bin) * 8) - 1: + raise ValueError('There are too many iterations.') + + for i in range(1, rounds + 1): + h = hmac.HMAC(key_material, self._algorithm, backend=self._backend) + + counter = utils.int_to_bytes(i, self._rlen) + if self._location == CounterLocation.BeforeFixed: + h.update(counter) + + h.update(self._generate_fixed_input()) + + if self._location == CounterLocation.AfterFixed: + h.update(counter) + + output.append(h.finalize()) + + return b''.join(output)[:self._length] + + def _generate_fixed_input(self): + if self._fixed_data and isinstance(self._fixed_data, bytes): + return self._fixed_data + + l_val = utils.int_to_bytes(self._length * 8, self._llen) + + return b"".join([self._label, b"\x00", self._context, l_val]) + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/pbkdf2.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/pbkdf2.py new file mode 100644 index 0000000..f8ce7a3 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/pbkdf2.py @@ -0,0 +1,58 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import PBKDF2HMACBackend +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +@utils.register_interface(KeyDerivationFunction) +class PBKDF2HMAC(object): + def __init__(self, algorithm, length, salt, iterations, backend): + if not isinstance(backend, PBKDF2HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement PBKDF2HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if not backend.pbkdf2_hmac_supported(algorithm): + raise UnsupportedAlgorithm( + "{0} is not supported for PBKDF2 by this backend.".format( + algorithm.name), + _Reasons.UNSUPPORTED_HASH + ) + self._used = False + self._algorithm = algorithm + self._length = length + if not isinstance(salt, bytes): + raise TypeError("salt must be bytes.") + self._salt = salt + self._iterations = iterations + self._backend = backend + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized("PBKDF2 instances can only be used once.") + self._used = True + + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + return self._backend.derive_pbkdf2_hmac( + self._algorithm, + self._length, + self._salt, + self._iterations, + key_material + ) + + def verify(self, key_material, expected_key): + derived_key = self.derive(key_material) + if not constant_time.bytes_eq(derived_key, expected_key): + raise InvalidKey("Keys do not match.") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/scrypt.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/scrypt.py new file mode 100644 index 0000000..77dcf9a --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/scrypt.py @@ -0,0 +1,66 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import sys + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import ScryptBackend +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +# This is used by the scrypt tests to skip tests that require more memory +# than the MEM_LIMIT +_MEM_LIMIT = sys.maxsize // 2 + + +@utils.register_interface(KeyDerivationFunction) +class Scrypt(object): + def __init__(self, salt, length, n, r, p, backend): + if not isinstance(backend, ScryptBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement ScryptBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._length = length + if not isinstance(salt, bytes): + raise TypeError("salt must be bytes.") + + if n < 2 or (n & (n - 1)) != 0: + raise ValueError("n must be greater than 1 and be a power of 2.") + + if r < 1: + raise ValueError("r must be greater than or equal to 1.") + + if p < 1: + raise ValueError("p must be greater than or equal to 1.") + + self._used = False + self._salt = salt + self._n = n + self._r = r + self._p = p + self._backend = backend + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized("Scrypt instances can only be used once.") + self._used = True + + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + return self._backend.derive_scrypt( + key_material, self._salt, self._length, self._n, self._r, self._p + ) + + def verify(self, key_material, expected_key): + derived_key = self.derive(key_material) + if not constant_time.bytes_eq(derived_key, expected_key): + raise InvalidKey("Keys do not match.") diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/x963kdf.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/x963kdf.py new file mode 100644 index 0000000..83789b3 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/kdf/x963kdf.py @@ -0,0 +1,70 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +from cryptography import utils +from cryptography.exceptions import ( + AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HashBackend +from cryptography.hazmat.primitives import constant_time, hashes +from cryptography.hazmat.primitives.kdf import KeyDerivationFunction + + +def _int_to_u32be(n): + return struct.pack('>I', n) + + +@utils.register_interface(KeyDerivationFunction) +class X963KDF(object): + def __init__(self, algorithm, length, sharedinfo, backend): + + max_len = algorithm.digest_size * (2 ** 32 - 1) + if length > max_len: + raise ValueError( + "Can not derive keys larger than {0} bits.".format(max_len)) + if not (sharedinfo is None or isinstance(sharedinfo, bytes)): + raise TypeError("sharedinfo must be bytes.") + self._algorithm = algorithm + self._length = length + self._sharedinfo = sharedinfo + + if not isinstance(backend, HashBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HashBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + self._backend = backend + self._used = False + + def derive(self, key_material): + if self._used: + raise AlreadyFinalized + self._used = True + + if not isinstance(key_material, bytes): + raise TypeError("key_material must be bytes.") + + output = [b""] + outlen = 0 + counter = 1 + + while self._length > outlen: + h = hashes.Hash(self._algorithm, self._backend) + h.update(key_material) + h.update(_int_to_u32be(counter)) + if self._sharedinfo is not None: + h.update(self._sharedinfo) + output.append(h.finalize()) + outlen += len(output[-1]) + counter += 1 + + return b"".join(output)[:self._length] + + def verify(self, key_material, expected_key): + if not constant_time.bytes_eq(self.derive(key_material), expected_key): + raise InvalidKey diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/keywrap.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/keywrap.py new file mode 100644 index 0000000..f55c519 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/keywrap.py @@ -0,0 +1,154 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import ECB +from cryptography.hazmat.primitives.constant_time import bytes_eq + + +def _wrap_core(wrapping_key, a, r, backend): + # RFC 3394 Key Wrap - 2.2.1 (index method) + encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor() + n = len(r) + for j in range(6): + for i in range(n): + # every encryption operation is a discrete 16 byte chunk (because + # AES has a 128-bit block size) and since we're using ECB it is + # safe to reuse the encryptor for the entire operation + b = encryptor.update(a + r[i]) + # pack/unpack are safe as these are always 64-bit chunks + a = struct.pack( + ">Q", struct.unpack(">Q", b[:8])[0] ^ ((n * j) + i + 1) + ) + r[i] = b[-8:] + + assert encryptor.finalize() == b"" + + return a + b"".join(r) + + +def aes_key_wrap(wrapping_key, key_to_wrap, backend): + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + if len(key_to_wrap) < 16: + raise ValueError("The key to wrap must be at least 16 bytes") + + if len(key_to_wrap) % 8 != 0: + raise ValueError("The key to wrap must be a multiple of 8 bytes") + + a = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6" + r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)] + return _wrap_core(wrapping_key, a, r, backend) + + +def _unwrap_core(wrapping_key, a, r, backend): + # Implement RFC 3394 Key Unwrap - 2.2.2 (index method) + decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor() + n = len(r) + for j in reversed(range(6)): + for i in reversed(range(n)): + # pack/unpack are safe as these are always 64-bit chunks + atr = struct.pack( + ">Q", struct.unpack(">Q", a)[0] ^ ((n * j) + i + 1) + ) + r[i] + # every decryption operation is a discrete 16 byte chunk so + # it is safe to reuse the decryptor for the entire operation + b = decryptor.update(atr) + a = b[:8] + r[i] = b[-8:] + + assert decryptor.finalize() == b"" + return a, r + + +def aes_key_wrap_with_padding(wrapping_key, key_to_wrap, backend): + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + aiv = b"\xA6\x59\x59\xA6" + struct.pack(">i", len(key_to_wrap)) + # pad the key to wrap if necessary + pad = (8 - (len(key_to_wrap) % 8)) % 8 + key_to_wrap = key_to_wrap + b"\x00" * pad + if len(key_to_wrap) == 8: + # RFC 5649 - 4.1 - exactly 8 octets after padding + encryptor = Cipher(AES(wrapping_key), ECB(), backend).encryptor() + b = encryptor.update(aiv + key_to_wrap) + assert encryptor.finalize() == b"" + return b + else: + r = [key_to_wrap[i:i + 8] for i in range(0, len(key_to_wrap), 8)] + return _wrap_core(wrapping_key, aiv, r, backend) + + +def aes_key_unwrap_with_padding(wrapping_key, wrapped_key, backend): + if len(wrapped_key) < 16: + raise InvalidUnwrap("Must be at least 16 bytes") + + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + if len(wrapped_key) == 16: + # RFC 5649 - 4.2 - exactly two 64-bit blocks + decryptor = Cipher(AES(wrapping_key), ECB(), backend).decryptor() + b = decryptor.update(wrapped_key) + assert decryptor.finalize() == b"" + a = b[:8] + data = b[8:] + n = 1 + else: + r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)] + encrypted_aiv = r.pop(0) + n = len(r) + a, r = _unwrap_core(wrapping_key, encrypted_aiv, r, backend) + data = b"".join(r) + + # 1) Check that MSB(32,A) = A65959A6. + # 2) Check that 8*(n-1) < LSB(32,A) <= 8*n. If so, let + # MLI = LSB(32,A). + # 3) Let b = (8*n)-MLI, and then check that the rightmost b octets of + # the output data are zero. + (mli,) = struct.unpack(">I", a[4:]) + b = (8 * n) - mli + if ( + not bytes_eq(a[:4], b"\xa6\x59\x59\xa6") or not + 8 * (n - 1) < mli <= 8 * n or ( + b != 0 and not bytes_eq(data[-b:], b"\x00" * b) + ) + ): + raise InvalidUnwrap() + + if b == 0: + return data + else: + return data[:-b] + + +def aes_key_unwrap(wrapping_key, wrapped_key, backend): + if len(wrapped_key) < 24: + raise InvalidUnwrap("Must be at least 24 bytes") + + if len(wrapped_key) % 8 != 0: + raise InvalidUnwrap("The wrapped key must be a multiple of 8 bytes") + + if len(wrapping_key) not in [16, 24, 32]: + raise ValueError("The wrapping key must be a valid AES key length") + + aiv = b"\xa6\xa6\xa6\xa6\xa6\xa6\xa6\xa6" + r = [wrapped_key[i:i + 8] for i in range(0, len(wrapped_key), 8)] + a = r.pop(0) + a, r = _unwrap_core(wrapping_key, a, r, backend) + if not bytes_eq(a, aiv): + raise InvalidUnwrap() + + return b"".join(r) + + +class InvalidUnwrap(Exception): + pass diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/mac.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/mac.py new file mode 100644 index 0000000..4c95190 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/mac.py @@ -0,0 +1,37 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class MACContext(object): + @abc.abstractmethod + def update(self, data): + """ + Processes the provided bytes. + """ + + @abc.abstractmethod + def finalize(self): + """ + Returns the message authentication code as bytes. + """ + + @abc.abstractmethod + def copy(self): + """ + Return a MACContext that is a copy of the current context. + """ + + @abc.abstractmethod + def verify(self, signature): + """ + Checks if the generated message authentication code matches the + signature. + """ diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/padding.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/padding.py new file mode 100644 index 0000000..a081976 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/padding.py @@ -0,0 +1,202 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc + +import six + +from cryptography import utils +from cryptography.exceptions import AlreadyFinalized +from cryptography.hazmat.bindings._padding import lib + + +@six.add_metaclass(abc.ABCMeta) +class PaddingContext(object): + @abc.abstractmethod + def update(self, data): + """ + Pads the provided bytes and returns any available data as bytes. + """ + + @abc.abstractmethod + def finalize(self): + """ + Finalize the padding, returns bytes. + """ + + +def _byte_padding_check(block_size): + if not (0 <= block_size <= 2040): + raise ValueError("block_size must be in range(0, 2041).") + + if block_size % 8 != 0: + raise ValueError("block_size must be a multiple of 8.") + + +def _byte_padding_update(buffer_, data, block_size): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + + buffer_ += data + + finished_blocks = len(buffer_) // (block_size // 8) + + result = buffer_[:finished_blocks * (block_size // 8)] + buffer_ = buffer_[finished_blocks * (block_size // 8):] + + return buffer_, result + + +def _byte_padding_pad(buffer_, block_size, paddingfn): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + pad_size = block_size // 8 - len(buffer_) + return buffer_ + paddingfn(pad_size) + + +def _byte_unpadding_update(buffer_, data, block_size): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + if not isinstance(data, bytes): + raise TypeError("data must be bytes.") + + buffer_ += data + + finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0) + + result = buffer_[:finished_blocks * (block_size // 8)] + buffer_ = buffer_[finished_blocks * (block_size // 8):] + + return buffer_, result + + +def _byte_unpadding_check(buffer_, block_size, checkfn): + if buffer_ is None: + raise AlreadyFinalized("Context was already finalized.") + + if len(buffer_) != block_size // 8: + raise ValueError("Invalid padding bytes.") + + valid = checkfn(buffer_, block_size // 8) + + if not valid: + raise ValueError("Invalid padding bytes.") + + pad_size = six.indexbytes(buffer_, -1) + return buffer_[:-pad_size] + + +class PKCS7(object): + def __init__(self, block_size): + _byte_padding_check(block_size) + self.block_size = block_size + + def padder(self): + return _PKCS7PaddingContext(self.block_size) + + def unpadder(self): + return _PKCS7UnpaddingContext(self.block_size) + + +@utils.register_interface(PaddingContext) +class _PKCS7PaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_padding_update( + self._buffer, data, self.block_size) + return result + + def _padding(self, size): + return six.int2byte(size) * size + + def finalize(self): + result = _byte_padding_pad( + self._buffer, self.block_size, self._padding) + self._buffer = None + return result + + +@utils.register_interface(PaddingContext) +class _PKCS7UnpaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_unpadding_update( + self._buffer, data, self.block_size) + return result + + def finalize(self): + result = _byte_unpadding_check( + self._buffer, self.block_size, + lib.Cryptography_check_pkcs7_padding) + self._buffer = None + return result + + +class ANSIX923(object): + def __init__(self, block_size): + _byte_padding_check(block_size) + self.block_size = block_size + + def padder(self): + return _ANSIX923PaddingContext(self.block_size) + + def unpadder(self): + return _ANSIX923UnpaddingContext(self.block_size) + + +@utils.register_interface(PaddingContext) +class _ANSIX923PaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_padding_update( + self._buffer, data, self.block_size) + return result + + def _padding(self, size): + return six.int2byte(0) * (size - 1) + six.int2byte(size) + + def finalize(self): + result = _byte_padding_pad( + self._buffer, self.block_size, self._padding) + self._buffer = None + return result + + +@utils.register_interface(PaddingContext) +class _ANSIX923UnpaddingContext(object): + def __init__(self, block_size): + self.block_size = block_size + # TODO: more copies than necessary, we should use zero-buffer (#193) + self._buffer = b"" + + def update(self, data): + self._buffer, result = _byte_unpadding_update( + self._buffer, data, self.block_size) + return result + + def finalize(self): + result = _byte_unpadding_check( + self._buffer, self.block_size, + lib.Cryptography_check_ansix923_padding) + self._buffer = None + return result diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/serialization.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/serialization.py new file mode 100644 index 0000000..bd09e6e --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/serialization.py @@ -0,0 +1,209 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import base64 +import struct +from enum import Enum + +import six + +from cryptography import utils +from cryptography.exceptions import UnsupportedAlgorithm +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa + + +def load_pem_private_key(data, password, backend): + return backend.load_pem_private_key(data, password) + + +def load_pem_public_key(data, backend): + return backend.load_pem_public_key(data) + + +def load_pem_parameters(data, backend): + return backend.load_pem_parameters(data) + + +def load_der_private_key(data, password, backend): + return backend.load_der_private_key(data, password) + + +def load_der_public_key(data, backend): + return backend.load_der_public_key(data) + + +def load_der_parameters(data, backend): + return backend.load_der_parameters(data) + + +def load_ssh_public_key(data, backend): + key_parts = data.split(b' ', 2) + + if len(key_parts) < 2: + raise ValueError( + 'Key is not in the proper format or contains extra data.') + + key_type = key_parts[0] + + if key_type == b'ssh-rsa': + loader = _load_ssh_rsa_public_key + elif key_type == b'ssh-dss': + loader = _load_ssh_dss_public_key + elif key_type in [ + b'ecdsa-sha2-nistp256', b'ecdsa-sha2-nistp384', b'ecdsa-sha2-nistp521', + ]: + loader = _load_ssh_ecdsa_public_key + else: + raise UnsupportedAlgorithm('Key type is not supported.') + + key_body = key_parts[1] + + try: + decoded_data = base64.b64decode(key_body) + except TypeError: + raise ValueError('Key is not in the proper format.') + + inner_key_type, rest = _ssh_read_next_string(decoded_data) + + if inner_key_type != key_type: + raise ValueError( + 'Key header and key body contain different key type values.' + ) + + return loader(key_type, rest, backend) + + +def _load_ssh_rsa_public_key(key_type, decoded_data, backend): + e, rest = _ssh_read_next_mpint(decoded_data) + n, rest = _ssh_read_next_mpint(rest) + + if rest: + raise ValueError('Key body contains extra bytes.') + + return rsa.RSAPublicNumbers(e, n).public_key(backend) + + +def _load_ssh_dss_public_key(key_type, decoded_data, backend): + p, rest = _ssh_read_next_mpint(decoded_data) + q, rest = _ssh_read_next_mpint(rest) + g, rest = _ssh_read_next_mpint(rest) + y, rest = _ssh_read_next_mpint(rest) + + if rest: + raise ValueError('Key body contains extra bytes.') + + parameter_numbers = dsa.DSAParameterNumbers(p, q, g) + public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers) + + return public_numbers.public_key(backend) + + +def _load_ssh_ecdsa_public_key(expected_key_type, decoded_data, backend): + curve_name, rest = _ssh_read_next_string(decoded_data) + data, rest = _ssh_read_next_string(rest) + + if expected_key_type != b"ecdsa-sha2-" + curve_name: + raise ValueError( + 'Key header and key body contain different key type values.' + ) + + if rest: + raise ValueError('Key body contains extra bytes.') + + curve = { + b"nistp256": ec.SECP256R1, + b"nistp384": ec.SECP384R1, + b"nistp521": ec.SECP521R1, + }[curve_name]() + + if six.indexbytes(data, 0) != 4: + raise NotImplementedError( + "Compressed elliptic curve points are not supported" + ) + + numbers = ec.EllipticCurvePublicNumbers.from_encoded_point(curve, data) + return numbers.public_key(backend) + + +def _ssh_read_next_string(data): + """ + Retrieves the next RFC 4251 string value from the data. + + While the RFC calls these strings, in Python they are bytes objects. + """ + if len(data) < 4: + raise ValueError("Key is not in the proper format") + + str_len, = struct.unpack('>I', data[:4]) + if len(data) < str_len + 4: + raise ValueError("Key is not in the proper format") + + return data[4:4 + str_len], data[4 + str_len:] + + +def _ssh_read_next_mpint(data): + """ + Reads the next mpint from the data. + + Currently, all mpints are interpreted as unsigned. + """ + mpint_data, rest = _ssh_read_next_string(data) + + return ( + utils.int_from_bytes(mpint_data, byteorder='big', signed=False), rest + ) + + +def _ssh_write_string(data): + return struct.pack(">I", len(data)) + data + + +def _ssh_write_mpint(value): + data = utils.int_to_bytes(value) + if six.indexbytes(data, 0) & 0x80: + data = b"\x00" + data + return _ssh_write_string(data) + + +class Encoding(Enum): + PEM = "PEM" + DER = "DER" + OpenSSH = "OpenSSH" + + +class PrivateFormat(Enum): + PKCS8 = "PKCS8" + TraditionalOpenSSL = "TraditionalOpenSSL" + + +class PublicFormat(Enum): + SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1" + PKCS1 = "Raw PKCS#1" + OpenSSH = "OpenSSH" + + +class ParameterFormat(Enum): + PKCS3 = "PKCS3" + + +@six.add_metaclass(abc.ABCMeta) +class KeySerializationEncryption(object): + pass + + +@utils.register_interface(KeySerializationEncryption) +class BestAvailableEncryption(object): + def __init__(self, password): + if not isinstance(password, bytes) or len(password) == 0: + raise ValueError("Password must be 1 or more bytes.") + + self.password = password + + +@utils.register_interface(KeySerializationEncryption) +class NoEncryption(object): + pass diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/__init__.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/__init__.py new file mode 100644 index 0000000..e71f9e6 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/__init__.py @@ -0,0 +1,9 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + + +class InvalidToken(Exception): + pass diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/hotp.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/hotp.py new file mode 100644 index 0000000..4ad1bdc --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/hotp.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import struct + +import six + +from cryptography.exceptions import ( + UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time, hmac +from cryptography.hazmat.primitives.hashes import SHA1, SHA256, SHA512 +from cryptography.hazmat.primitives.twofactor import InvalidToken +from cryptography.hazmat.primitives.twofactor.utils import _generate_uri + + +class HOTP(object): + def __init__(self, key, length, algorithm, backend, + enforce_key_length=True): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + if len(key) < 16 and enforce_key_length is True: + raise ValueError("Key length has to be at least 128 bits.") + + if not isinstance(length, six.integer_types): + raise TypeError("Length parameter must be an integer type.") + + if length < 6 or length > 8: + raise ValueError("Length of HOTP has to be between 6 to 8.") + + if not isinstance(algorithm, (SHA1, SHA256, SHA512)): + raise TypeError("Algorithm must be SHA1, SHA256 or SHA512.") + + self._key = key + self._length = length + self._algorithm = algorithm + self._backend = backend + + def generate(self, counter): + truncated_value = self._dynamic_truncate(counter) + hotp = truncated_value % (10 ** self._length) + return "{0:0{1}}".format(hotp, self._length).encode() + + def verify(self, hotp, counter): + if not constant_time.bytes_eq(self.generate(counter), hotp): + raise InvalidToken("Supplied HOTP value does not match.") + + def _dynamic_truncate(self, counter): + ctx = hmac.HMAC(self._key, self._algorithm, self._backend) + ctx.update(struct.pack(">Q", counter)) + hmac_value = ctx.finalize() + + offset = six.indexbytes(hmac_value, len(hmac_value) - 1) & 0b1111 + p = hmac_value[offset:offset + 4] + return struct.unpack(">I", p)[0] & 0x7fffffff + + def get_provisioning_uri(self, account_name, counter, issuer): + return _generate_uri(self, "hotp", account_name, issuer, [ + ("counter", int(counter)), + ]) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/totp.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/totp.py new file mode 100644 index 0000000..499f282 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/totp.py @@ -0,0 +1,40 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.exceptions import ( + UnsupportedAlgorithm, _Reasons +) +from cryptography.hazmat.backends.interfaces import HMACBackend +from cryptography.hazmat.primitives import constant_time +from cryptography.hazmat.primitives.twofactor import InvalidToken +from cryptography.hazmat.primitives.twofactor.hotp import HOTP +from cryptography.hazmat.primitives.twofactor.utils import _generate_uri + + +class TOTP(object): + def __init__(self, key, length, algorithm, time_step, backend, + enforce_key_length=True): + if not isinstance(backend, HMACBackend): + raise UnsupportedAlgorithm( + "Backend object does not implement HMACBackend.", + _Reasons.BACKEND_MISSING_INTERFACE + ) + + self._time_step = time_step + self._hotp = HOTP(key, length, algorithm, backend, enforce_key_length) + + def generate(self, time): + counter = int(time / self._time_step) + return self._hotp.generate(counter) + + def verify(self, totp, time): + if not constant_time.bytes_eq(self.generate(time), totp): + raise InvalidToken("Supplied TOTP value does not match.") + + def get_provisioning_uri(self, account_name, issuer): + return _generate_uri(self._hotp, "totp", account_name, issuer, [ + ("period", int(self._time_step)), + ]) diff --git a/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/utils.py b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/utils.py new file mode 100644 index 0000000..0ed8c4c --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/hazmat/primitives/twofactor/utils.py @@ -0,0 +1,30 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import base64 + +from six.moves.urllib.parse import quote, urlencode + + +def _generate_uri(hotp, type_name, account_name, issuer, extra_parameters): + parameters = [ + ("digits", hotp._length), + ("secret", base64.b32encode(hotp._key)), + ("algorithm", hotp._algorithm.name.upper()), + ] + + if issuer is not None: + parameters.append(("issuer", issuer)) + + parameters.extend(extra_parameters) + + uriparts = { + "type": type_name, + "label": ("%s:%s" % (quote(issuer), quote(account_name)) if issuer + else quote(account_name)), + "parameters": urlencode(parameters), + } + return "otpauth://{type}/{label}?{parameters}".format(**uriparts) diff --git a/server/www/packages/packages-windows/x86/cryptography/utils.py b/server/www/packages/packages-windows/x86/cryptography/utils.py new file mode 100644 index 0000000..3d45a77 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/utils.py @@ -0,0 +1,165 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import binascii +import inspect +import sys +import warnings + + +# We use a UserWarning subclass, instead of DeprecationWarning, because CPython +# decided deprecation warnings should be invisble by default. +class CryptographyDeprecationWarning(UserWarning): + pass + + +# Several APIs were deprecated with no specific end-of-life date because of the +# ubiquity of their use. They should not be removed until we agree on when that +# cycle ends. +PersistentlyDeprecated = CryptographyDeprecationWarning +DeprecatedIn21 = CryptographyDeprecationWarning +DeprecatedIn23 = CryptographyDeprecationWarning + + +def _check_bytes(name, value): + if not isinstance(value, bytes): + raise TypeError("{0} must be bytes".format(name)) + + +def read_only_property(name): + return property(lambda self: getattr(self, name)) + + +def register_interface(iface): + def register_decorator(klass): + verify_interface(iface, klass) + iface.register(klass) + return klass + return register_decorator + + +def register_interface_if(predicate, iface): + def register_decorator(klass): + if predicate: + verify_interface(iface, klass) + iface.register(klass) + return klass + return register_decorator + + +if hasattr(int, "from_bytes"): + int_from_bytes = int.from_bytes +else: + def int_from_bytes(data, byteorder, signed=False): + assert byteorder == 'big' + assert not signed + + return int(binascii.hexlify(data), 16) + + +if hasattr(int, "to_bytes"): + def int_to_bytes(integer, length=None): + return integer.to_bytes( + length or (integer.bit_length() + 7) // 8 or 1, 'big' + ) +else: + def int_to_bytes(integer, length=None): + hex_string = '%x' % integer + if length is None: + n = len(hex_string) + else: + n = length * 2 + return binascii.unhexlify(hex_string.zfill(n + (n & 1))) + + +class InterfaceNotImplemented(Exception): + pass + + +if hasattr(inspect, "signature"): + signature = inspect.signature +else: + signature = inspect.getargspec + + +def verify_interface(iface, klass): + for method in iface.__abstractmethods__: + if not hasattr(klass, method): + raise InterfaceNotImplemented( + "{0} is missing a {1!r} method".format(klass, method) + ) + if isinstance(getattr(iface, method), abc.abstractproperty): + # Can't properly verify these yet. + continue + sig = signature(getattr(iface, method)) + actual = signature(getattr(klass, method)) + if sig != actual: + raise InterfaceNotImplemented( + "{0}.{1}'s signature differs from the expected. Expected: " + "{2!r}. Received: {3!r}".format( + klass, method, sig, actual + ) + ) + + +# No longer needed as of 2.2, but retained because we have external consumers +# who use it. +def bit_length(x): + return x.bit_length() + + +class _DeprecatedValue(object): + def __init__(self, value, message, warning_class): + self.value = value + self.message = message + self.warning_class = warning_class + + +class _ModuleWithDeprecations(object): + def __init__(self, module): + self.__dict__["_module"] = module + + def __getattr__(self, attr): + obj = getattr(self._module, attr) + if isinstance(obj, _DeprecatedValue): + warnings.warn(obj.message, obj.warning_class, stacklevel=2) + obj = obj.value + return obj + + def __setattr__(self, attr, value): + setattr(self._module, attr, value) + + def __delattr__(self, attr): + obj = getattr(self._module, attr) + if isinstance(obj, _DeprecatedValue): + warnings.warn(obj.message, obj.warning_class, stacklevel=2) + + delattr(self._module, attr) + + def __dir__(self): + return ["_module"] + dir(self._module) + + +def deprecated(value, module_name, message, warning_class): + module = sys.modules[module_name] + if not isinstance(module, _ModuleWithDeprecations): + sys.modules[module_name] = _ModuleWithDeprecations(module) + return _DeprecatedValue(value, message, warning_class) + + +def cached_property(func): + cached_name = "_cached_{0}".format(func) + sentinel = object() + + def inner(instance): + cache = getattr(instance, cached_name, sentinel) + if cache is not sentinel: + return cache + result = func(instance) + setattr(instance, cached_name, result) + return result + return property(inner) diff --git a/server/www/packages/packages-windows/x86/cryptography/x509/__init__.py b/server/www/packages/packages-windows/x86/cryptography/x509/__init__.py new file mode 100644 index 0000000..d2f9b04 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/x509/__init__.py @@ -0,0 +1,185 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography.x509 import certificate_transparency +from cryptography.x509.base import ( + Certificate, CertificateBuilder, CertificateRevocationList, + CertificateRevocationListBuilder, + CertificateSigningRequest, CertificateSigningRequestBuilder, + InvalidVersion, RevokedCertificate, RevokedCertificateBuilder, + Version, load_der_x509_certificate, load_der_x509_crl, load_der_x509_csr, + load_pem_x509_certificate, load_pem_x509_crl, load_pem_x509_csr, + random_serial_number, +) +from cryptography.x509.extensions import ( + AccessDescription, AuthorityInformationAccess, + AuthorityKeyIdentifier, BasicConstraints, CRLDistributionPoints, + CRLNumber, CRLReason, CertificateIssuer, CertificatePolicies, + DeltaCRLIndicator, DistributionPoint, DuplicateExtension, ExtendedKeyUsage, + Extension, ExtensionNotFound, ExtensionType, Extensions, FreshestCRL, + GeneralNames, InhibitAnyPolicy, InvalidityDate, IssuerAlternativeName, + KeyUsage, NameConstraints, NoticeReference, OCSPNoCheck, PolicyConstraints, + PolicyInformation, PrecertificateSignedCertificateTimestamps, ReasonFlags, + SubjectAlternativeName, SubjectKeyIdentifier, TLSFeature, TLSFeatureType, + UnrecognizedExtension, UserNotice +) +from cryptography.x509.general_name import ( + DNSName, DirectoryName, GeneralName, IPAddress, OtherName, RFC822Name, + RegisteredID, UniformResourceIdentifier, UnsupportedGeneralNameType, + _GENERAL_NAMES +) +from cryptography.x509.name import ( + Name, NameAttribute, RelativeDistinguishedName +) +from cryptography.x509.oid import ( + AuthorityInformationAccessOID, CRLEntryExtensionOID, + CertificatePoliciesOID, ExtendedKeyUsageOID, ExtensionOID, NameOID, + ObjectIdentifier, SignatureAlgorithmOID, _SIG_OIDS_TO_HASH +) + + +OID_AUTHORITY_INFORMATION_ACCESS = ExtensionOID.AUTHORITY_INFORMATION_ACCESS +OID_AUTHORITY_KEY_IDENTIFIER = ExtensionOID.AUTHORITY_KEY_IDENTIFIER +OID_BASIC_CONSTRAINTS = ExtensionOID.BASIC_CONSTRAINTS +OID_CERTIFICATE_POLICIES = ExtensionOID.CERTIFICATE_POLICIES +OID_CRL_DISTRIBUTION_POINTS = ExtensionOID.CRL_DISTRIBUTION_POINTS +OID_EXTENDED_KEY_USAGE = ExtensionOID.EXTENDED_KEY_USAGE +OID_FRESHEST_CRL = ExtensionOID.FRESHEST_CRL +OID_INHIBIT_ANY_POLICY = ExtensionOID.INHIBIT_ANY_POLICY +OID_ISSUER_ALTERNATIVE_NAME = ExtensionOID.ISSUER_ALTERNATIVE_NAME +OID_KEY_USAGE = ExtensionOID.KEY_USAGE +OID_NAME_CONSTRAINTS = ExtensionOID.NAME_CONSTRAINTS +OID_OCSP_NO_CHECK = ExtensionOID.OCSP_NO_CHECK +OID_POLICY_CONSTRAINTS = ExtensionOID.POLICY_CONSTRAINTS +OID_POLICY_MAPPINGS = ExtensionOID.POLICY_MAPPINGS +OID_SUBJECT_ALTERNATIVE_NAME = ExtensionOID.SUBJECT_ALTERNATIVE_NAME +OID_SUBJECT_DIRECTORY_ATTRIBUTES = ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES +OID_SUBJECT_INFORMATION_ACCESS = ExtensionOID.SUBJECT_INFORMATION_ACCESS +OID_SUBJECT_KEY_IDENTIFIER = ExtensionOID.SUBJECT_KEY_IDENTIFIER + +OID_DSA_WITH_SHA1 = SignatureAlgorithmOID.DSA_WITH_SHA1 +OID_DSA_WITH_SHA224 = SignatureAlgorithmOID.DSA_WITH_SHA224 +OID_DSA_WITH_SHA256 = SignatureAlgorithmOID.DSA_WITH_SHA256 +OID_ECDSA_WITH_SHA1 = SignatureAlgorithmOID.ECDSA_WITH_SHA1 +OID_ECDSA_WITH_SHA224 = SignatureAlgorithmOID.ECDSA_WITH_SHA224 +OID_ECDSA_WITH_SHA256 = SignatureAlgorithmOID.ECDSA_WITH_SHA256 +OID_ECDSA_WITH_SHA384 = SignatureAlgorithmOID.ECDSA_WITH_SHA384 +OID_ECDSA_WITH_SHA512 = SignatureAlgorithmOID.ECDSA_WITH_SHA512 +OID_RSA_WITH_MD5 = SignatureAlgorithmOID.RSA_WITH_MD5 +OID_RSA_WITH_SHA1 = SignatureAlgorithmOID.RSA_WITH_SHA1 +OID_RSA_WITH_SHA224 = SignatureAlgorithmOID.RSA_WITH_SHA224 +OID_RSA_WITH_SHA256 = SignatureAlgorithmOID.RSA_WITH_SHA256 +OID_RSA_WITH_SHA384 = SignatureAlgorithmOID.RSA_WITH_SHA384 +OID_RSA_WITH_SHA512 = SignatureAlgorithmOID.RSA_WITH_SHA512 +OID_RSASSA_PSS = SignatureAlgorithmOID.RSASSA_PSS + +OID_COMMON_NAME = NameOID.COMMON_NAME +OID_COUNTRY_NAME = NameOID.COUNTRY_NAME +OID_DOMAIN_COMPONENT = NameOID.DOMAIN_COMPONENT +OID_DN_QUALIFIER = NameOID.DN_QUALIFIER +OID_EMAIL_ADDRESS = NameOID.EMAIL_ADDRESS +OID_GENERATION_QUALIFIER = NameOID.GENERATION_QUALIFIER +OID_GIVEN_NAME = NameOID.GIVEN_NAME +OID_LOCALITY_NAME = NameOID.LOCALITY_NAME +OID_ORGANIZATIONAL_UNIT_NAME = NameOID.ORGANIZATIONAL_UNIT_NAME +OID_ORGANIZATION_NAME = NameOID.ORGANIZATION_NAME +OID_PSEUDONYM = NameOID.PSEUDONYM +OID_SERIAL_NUMBER = NameOID.SERIAL_NUMBER +OID_STATE_OR_PROVINCE_NAME = NameOID.STATE_OR_PROVINCE_NAME +OID_SURNAME = NameOID.SURNAME +OID_TITLE = NameOID.TITLE + +OID_CLIENT_AUTH = ExtendedKeyUsageOID.CLIENT_AUTH +OID_CODE_SIGNING = ExtendedKeyUsageOID.CODE_SIGNING +OID_EMAIL_PROTECTION = ExtendedKeyUsageOID.EMAIL_PROTECTION +OID_OCSP_SIGNING = ExtendedKeyUsageOID.OCSP_SIGNING +OID_SERVER_AUTH = ExtendedKeyUsageOID.SERVER_AUTH +OID_TIME_STAMPING = ExtendedKeyUsageOID.TIME_STAMPING + +OID_ANY_POLICY = CertificatePoliciesOID.ANY_POLICY +OID_CPS_QUALIFIER = CertificatePoliciesOID.CPS_QUALIFIER +OID_CPS_USER_NOTICE = CertificatePoliciesOID.CPS_USER_NOTICE + +OID_CERTIFICATE_ISSUER = CRLEntryExtensionOID.CERTIFICATE_ISSUER +OID_CRL_REASON = CRLEntryExtensionOID.CRL_REASON +OID_INVALIDITY_DATE = CRLEntryExtensionOID.INVALIDITY_DATE + +OID_CA_ISSUERS = AuthorityInformationAccessOID.CA_ISSUERS +OID_OCSP = AuthorityInformationAccessOID.OCSP + +__all__ = [ + "certificate_transparency", + "load_pem_x509_certificate", + "load_der_x509_certificate", + "load_pem_x509_csr", + "load_der_x509_csr", + "load_pem_x509_crl", + "load_der_x509_crl", + "random_serial_number", + "InvalidVersion", + "DeltaCRLIndicator", + "DuplicateExtension", + "ExtensionNotFound", + "UnsupportedGeneralNameType", + "NameAttribute", + "Name", + "RelativeDistinguishedName", + "ObjectIdentifier", + "ExtensionType", + "Extensions", + "Extension", + "ExtendedKeyUsage", + "FreshestCRL", + "TLSFeature", + "TLSFeatureType", + "OCSPNoCheck", + "BasicConstraints", + "CRLNumber", + "KeyUsage", + "AuthorityInformationAccess", + "AccessDescription", + "CertificatePolicies", + "PolicyInformation", + "UserNotice", + "NoticeReference", + "SubjectKeyIdentifier", + "NameConstraints", + "CRLDistributionPoints", + "DistributionPoint", + "ReasonFlags", + "InhibitAnyPolicy", + "SubjectAlternativeName", + "IssuerAlternativeName", + "AuthorityKeyIdentifier", + "GeneralNames", + "GeneralName", + "RFC822Name", + "DNSName", + "UniformResourceIdentifier", + "RegisteredID", + "DirectoryName", + "IPAddress", + "OtherName", + "Certificate", + "CertificateRevocationList", + "CertificateRevocationListBuilder", + "CertificateSigningRequest", + "RevokedCertificate", + "RevokedCertificateBuilder", + "CertificateSigningRequestBuilder", + "CertificateBuilder", + "Version", + "_SIG_OIDS_TO_HASH", + "OID_CA_ISSUERS", + "OID_OCSP", + "_GENERAL_NAMES", + "CertificateIssuer", + "CRLReason", + "InvalidityDate", + "UnrecognizedExtension", + "PolicyConstraints", + "PrecertificateSignedCertificateTimestamps", +] diff --git a/server/www/packages/packages-windows/x86/cryptography/x509/base.py b/server/www/packages/packages-windows/x86/cryptography/x509/base.py new file mode 100644 index 0000000..b14499c --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/x509/base.py @@ -0,0 +1,743 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import datetime +import os +from enum import Enum + +import six + +from cryptography import utils +from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa +from cryptography.x509.extensions import Extension, ExtensionType +from cryptography.x509.name import Name + + +_UNIX_EPOCH = datetime.datetime(1970, 1, 1) + + +def _convert_to_naive_utc_time(time): + """Normalizes a datetime to a naive datetime in UTC. + + time -- datetime to normalize. Assumed to be in UTC if not timezone + aware. + """ + if time.tzinfo is not None: + offset = time.utcoffset() + offset = offset if offset else datetime.timedelta() + return time.replace(tzinfo=None) - offset + else: + return time + + +class Version(Enum): + v1 = 0 + v3 = 2 + + +def load_pem_x509_certificate(data, backend): + return backend.load_pem_x509_certificate(data) + + +def load_der_x509_certificate(data, backend): + return backend.load_der_x509_certificate(data) + + +def load_pem_x509_csr(data, backend): + return backend.load_pem_x509_csr(data) + + +def load_der_x509_csr(data, backend): + return backend.load_der_x509_csr(data) + + +def load_pem_x509_crl(data, backend): + return backend.load_pem_x509_crl(data) + + +def load_der_x509_crl(data, backend): + return backend.load_der_x509_crl(data) + + +class InvalidVersion(Exception): + def __init__(self, msg, parsed_version): + super(InvalidVersion, self).__init__(msg) + self.parsed_version = parsed_version + + +@six.add_metaclass(abc.ABCMeta) +class Certificate(object): + @abc.abstractmethod + def fingerprint(self, algorithm): + """ + Returns bytes using digest passed. + """ + + @abc.abstractproperty + def serial_number(self): + """ + Returns certificate serial number + """ + + @abc.abstractproperty + def version(self): + """ + Returns the certificate version + """ + + @abc.abstractmethod + def public_key(self): + """ + Returns the public key + """ + + @abc.abstractproperty + def not_valid_before(self): + """ + Not before time (represented as UTC datetime) + """ + + @abc.abstractproperty + def not_valid_after(self): + """ + Not after time (represented as UTC datetime) + """ + + @abc.abstractproperty + def issuer(self): + """ + Returns the issuer name object. + """ + + @abc.abstractproperty + def subject(self): + """ + Returns the subject name object. + """ + + @abc.abstractproperty + def signature_hash_algorithm(self): + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self): + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns an Extensions object. + """ + + @abc.abstractproperty + def signature(self): + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certificate_bytes(self): + """ + Returns the tbsCertificate payload bytes as defined in RFC 5280. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Checks equality. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Checks not equal. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Computes a hash. + """ + + @abc.abstractmethod + def public_bytes(self, encoding): + """ + Serializes the certificate to PEM or DER format. + """ + + +@six.add_metaclass(abc.ABCMeta) +class CertificateRevocationList(object): + @abc.abstractmethod + def public_bytes(self, encoding): + """ + Serializes the CRL to PEM or DER format. + """ + + @abc.abstractmethod + def fingerprint(self, algorithm): + """ + Returns bytes using digest passed. + """ + + @abc.abstractmethod + def get_revoked_certificate_by_serial_number(self, serial_number): + """ + Returns an instance of RevokedCertificate or None if the serial_number + is not in the CRL. + """ + + @abc.abstractproperty + def signature_hash_algorithm(self): + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self): + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def issuer(self): + """ + Returns the X509Name with the issuer of this CRL. + """ + + @abc.abstractproperty + def next_update(self): + """ + Returns the date of next update for this CRL. + """ + + @abc.abstractproperty + def last_update(self): + """ + Returns the date of last update for this CRL. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns an Extensions object containing a list of CRL extensions. + """ + + @abc.abstractproperty + def signature(self): + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certlist_bytes(self): + """ + Returns the tbsCertList payload bytes as defined in RFC 5280. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Checks equality. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Checks not equal. + """ + + @abc.abstractmethod + def is_signature_valid(self, public_key): + """ + Verifies signature of revocation list against given public key. + """ + + +@six.add_metaclass(abc.ABCMeta) +class CertificateSigningRequest(object): + @abc.abstractmethod + def __eq__(self, other): + """ + Checks equality. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Checks not equal. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Computes a hash. + """ + + @abc.abstractmethod + def public_key(self): + """ + Returns the public key + """ + + @abc.abstractproperty + def subject(self): + """ + Returns the subject name object. + """ + + @abc.abstractproperty + def signature_hash_algorithm(self): + """ + Returns a HashAlgorithm corresponding to the type of the digest signed + in the certificate. + """ + + @abc.abstractproperty + def signature_algorithm_oid(self): + """ + Returns the ObjectIdentifier of the signature algorithm. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns the extensions in the signing request. + """ + + @abc.abstractmethod + def public_bytes(self, encoding): + """ + Encodes the request to PEM or DER format. + """ + + @abc.abstractproperty + def signature(self): + """ + Returns the signature bytes. + """ + + @abc.abstractproperty + def tbs_certrequest_bytes(self): + """ + Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC + 2986. + """ + + @abc.abstractproperty + def is_signature_valid(self): + """ + Verifies signature of signing request. + """ + + +@six.add_metaclass(abc.ABCMeta) +class RevokedCertificate(object): + @abc.abstractproperty + def serial_number(self): + """ + Returns the serial number of the revoked certificate. + """ + + @abc.abstractproperty + def revocation_date(self): + """ + Returns the date of when this certificate was revoked. + """ + + @abc.abstractproperty + def extensions(self): + """ + Returns an Extensions object containing a list of Revoked extensions. + """ + + +class CertificateSigningRequestBuilder(object): + def __init__(self, subject_name=None, extensions=[]): + """ + Creates an empty X.509 certificate request (v1). + """ + self._subject_name = subject_name + self._extensions = extensions + + def subject_name(self, name): + """ + Sets the certificate requestor's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError('Expecting x509.Name object.') + if self._subject_name is not None: + raise ValueError('The subject name may only be set once.') + return CertificateSigningRequestBuilder(name, self._extensions) + + def add_extension(self, extension, critical): + """ + Adds an X.509 extension to the certificate request. + """ + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + return CertificateSigningRequestBuilder( + self._subject_name, self._extensions + [extension] + ) + + def sign(self, private_key, algorithm, backend): + """ + Signs the request using the requestor's private key. + """ + if self._subject_name is None: + raise ValueError("A CertificateSigningRequest must have a subject") + return backend.create_x509_csr(self, private_key, algorithm) + + +class CertificateBuilder(object): + def __init__(self, issuer_name=None, subject_name=None, + public_key=None, serial_number=None, not_valid_before=None, + not_valid_after=None, extensions=[]): + self._version = Version.v3 + self._issuer_name = issuer_name + self._subject_name = subject_name + self._public_key = public_key + self._serial_number = serial_number + self._not_valid_before = not_valid_before + self._not_valid_after = not_valid_after + self._extensions = extensions + + def issuer_name(self, name): + """ + Sets the CA's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError('Expecting x509.Name object.') + if self._issuer_name is not None: + raise ValueError('The issuer name may only be set once.') + return CertificateBuilder( + name, self._subject_name, self._public_key, + self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def subject_name(self, name): + """ + Sets the requestor's distinguished name. + """ + if not isinstance(name, Name): + raise TypeError('Expecting x509.Name object.') + if self._subject_name is not None: + raise ValueError('The subject name may only be set once.') + return CertificateBuilder( + self._issuer_name, name, self._public_key, + self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def public_key(self, key): + """ + Sets the requestor's public key (as found in the signing request). + """ + if not isinstance(key, (dsa.DSAPublicKey, rsa.RSAPublicKey, + ec.EllipticCurvePublicKey)): + raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey,' + ' or EllipticCurvePublicKey.') + if self._public_key is not None: + raise ValueError('The public key may only be set once.') + return CertificateBuilder( + self._issuer_name, self._subject_name, key, + self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def serial_number(self, number): + """ + Sets the certificate serial number. + """ + if not isinstance(number, six.integer_types): + raise TypeError('Serial number must be of integral type.') + if self._serial_number is not None: + raise ValueError('The serial number may only be set once.') + if number <= 0: + raise ValueError('The serial number should be positive.') + + # ASN.1 integers are always signed, so most significant bit must be + # zero. + if number.bit_length() >= 160: # As defined in RFC 5280 + raise ValueError('The serial number should not be more than 159 ' + 'bits.') + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, number, self._not_valid_before, + self._not_valid_after, self._extensions + ) + + def not_valid_before(self, time): + """ + Sets the certificate activation time. + """ + if not isinstance(time, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._not_valid_before is not None: + raise ValueError('The not valid before may only be set once.') + time = _convert_to_naive_utc_time(time) + if time <= _UNIX_EPOCH: + raise ValueError('The not valid before date must be after the unix' + ' epoch (1970 January 1).') + if self._not_valid_after is not None and time > self._not_valid_after: + raise ValueError( + 'The not valid before date must be before the not valid after ' + 'date.' + ) + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, self._serial_number, time, + self._not_valid_after, self._extensions + ) + + def not_valid_after(self, time): + """ + Sets the certificate expiration time. + """ + if not isinstance(time, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._not_valid_after is not None: + raise ValueError('The not valid after may only be set once.') + time = _convert_to_naive_utc_time(time) + if time <= _UNIX_EPOCH: + raise ValueError('The not valid after date must be after the unix' + ' epoch (1970 January 1).') + if (self._not_valid_before is not None and + time < self._not_valid_before): + raise ValueError( + 'The not valid after date must be after the not valid before ' + 'date.' + ) + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, self._serial_number, self._not_valid_before, + time, self._extensions + ) + + def add_extension(self, extension, critical): + """ + Adds an X.509 extension to the certificate. + """ + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + + return CertificateBuilder( + self._issuer_name, self._subject_name, + self._public_key, self._serial_number, self._not_valid_before, + self._not_valid_after, self._extensions + [extension] + ) + + def sign(self, private_key, algorithm, backend): + """ + Signs the certificate using the CA's private key. + """ + if self._subject_name is None: + raise ValueError("A certificate must have a subject name") + + if self._issuer_name is None: + raise ValueError("A certificate must have an issuer name") + + if self._serial_number is None: + raise ValueError("A certificate must have a serial number") + + if self._not_valid_before is None: + raise ValueError("A certificate must have a not valid before time") + + if self._not_valid_after is None: + raise ValueError("A certificate must have a not valid after time") + + if self._public_key is None: + raise ValueError("A certificate must have a public key") + + return backend.create_x509_certificate(self, private_key, algorithm) + + +class CertificateRevocationListBuilder(object): + def __init__(self, issuer_name=None, last_update=None, next_update=None, + extensions=[], revoked_certificates=[]): + self._issuer_name = issuer_name + self._last_update = last_update + self._next_update = next_update + self._extensions = extensions + self._revoked_certificates = revoked_certificates + + def issuer_name(self, issuer_name): + if not isinstance(issuer_name, Name): + raise TypeError('Expecting x509.Name object.') + if self._issuer_name is not None: + raise ValueError('The issuer name may only be set once.') + return CertificateRevocationListBuilder( + issuer_name, self._last_update, self._next_update, + self._extensions, self._revoked_certificates + ) + + def last_update(self, last_update): + if not isinstance(last_update, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._last_update is not None: + raise ValueError('Last update may only be set once.') + last_update = _convert_to_naive_utc_time(last_update) + if last_update <= _UNIX_EPOCH: + raise ValueError('The last update date must be after the unix' + ' epoch (1970 January 1).') + if self._next_update is not None and last_update > self._next_update: + raise ValueError( + 'The last update date must be before the next update date.' + ) + return CertificateRevocationListBuilder( + self._issuer_name, last_update, self._next_update, + self._extensions, self._revoked_certificates + ) + + def next_update(self, next_update): + if not isinstance(next_update, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._next_update is not None: + raise ValueError('Last update may only be set once.') + next_update = _convert_to_naive_utc_time(next_update) + if next_update <= _UNIX_EPOCH: + raise ValueError('The last update date must be after the unix' + ' epoch (1970 January 1).') + if self._last_update is not None and next_update < self._last_update: + raise ValueError( + 'The next update date must be after the last update date.' + ) + return CertificateRevocationListBuilder( + self._issuer_name, self._last_update, next_update, + self._extensions, self._revoked_certificates + ) + + def add_extension(self, extension, critical): + """ + Adds an X.509 extension to the certificate revocation list. + """ + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + return CertificateRevocationListBuilder( + self._issuer_name, self._last_update, self._next_update, + self._extensions + [extension], self._revoked_certificates + ) + + def add_revoked_certificate(self, revoked_certificate): + """ + Adds a revoked certificate to the CRL. + """ + if not isinstance(revoked_certificate, RevokedCertificate): + raise TypeError("Must be an instance of RevokedCertificate") + + return CertificateRevocationListBuilder( + self._issuer_name, self._last_update, + self._next_update, self._extensions, + self._revoked_certificates + [revoked_certificate] + ) + + def sign(self, private_key, algorithm, backend): + if self._issuer_name is None: + raise ValueError("A CRL must have an issuer name") + + if self._last_update is None: + raise ValueError("A CRL must have a last update time") + + if self._next_update is None: + raise ValueError("A CRL must have a next update time") + + return backend.create_x509_crl(self, private_key, algorithm) + + +class RevokedCertificateBuilder(object): + def __init__(self, serial_number=None, revocation_date=None, + extensions=[]): + self._serial_number = serial_number + self._revocation_date = revocation_date + self._extensions = extensions + + def serial_number(self, number): + if not isinstance(number, six.integer_types): + raise TypeError('Serial number must be of integral type.') + if self._serial_number is not None: + raise ValueError('The serial number may only be set once.') + if number <= 0: + raise ValueError('The serial number should be positive') + + # ASN.1 integers are always signed, so most significant bit must be + # zero. + if number.bit_length() >= 160: # As defined in RFC 5280 + raise ValueError('The serial number should not be more than 159 ' + 'bits.') + return RevokedCertificateBuilder( + number, self._revocation_date, self._extensions + ) + + def revocation_date(self, time): + if not isinstance(time, datetime.datetime): + raise TypeError('Expecting datetime object.') + if self._revocation_date is not None: + raise ValueError('The revocation date may only be set once.') + time = _convert_to_naive_utc_time(time) + if time <= _UNIX_EPOCH: + raise ValueError('The revocation date must be after the unix' + ' epoch (1970 January 1).') + return RevokedCertificateBuilder( + self._serial_number, time, self._extensions + ) + + def add_extension(self, extension, critical): + if not isinstance(extension, ExtensionType): + raise TypeError("extension must be an ExtensionType") + + extension = Extension(extension.oid, critical, extension) + + # TODO: This is quadratic in the number of extensions + for e in self._extensions: + if e.oid == extension.oid: + raise ValueError('This extension has already been set.') + return RevokedCertificateBuilder( + self._serial_number, self._revocation_date, + self._extensions + [extension] + ) + + def build(self, backend): + if self._serial_number is None: + raise ValueError("A revoked certificate must have a serial number") + if self._revocation_date is None: + raise ValueError( + "A revoked certificate must have a revocation date" + ) + + return backend.create_x509_revoked_certificate(self) + + +def random_serial_number(): + return utils.int_from_bytes(os.urandom(20), "big") >> 1 diff --git a/server/www/packages/packages-windows/x86/cryptography/x509/certificate_transparency.py b/server/www/packages/packages-windows/x86/cryptography/x509/certificate_transparency.py new file mode 100644 index 0000000..d00fe81 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/x509/certificate_transparency.py @@ -0,0 +1,46 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +from enum import Enum + +import six + + +class LogEntryType(Enum): + X509_CERTIFICATE = 0 + PRE_CERTIFICATE = 1 + + +class Version(Enum): + v1 = 0 + + +@six.add_metaclass(abc.ABCMeta) +class SignedCertificateTimestamp(object): + @abc.abstractproperty + def version(self): + """ + Returns the SCT version. + """ + + @abc.abstractproperty + def log_id(self): + """ + Returns an identifier indicating which log this SCT is for. + """ + + @abc.abstractproperty + def timestamp(self): + """ + Returns the timestamp for this SCT. + """ + + @abc.abstractproperty + def entry_type(self): + """ + Returns whether this is an SCT for a certificate or pre-certificate. + """ diff --git a/server/www/packages/packages-windows/x86/cryptography/x509/extensions.py b/server/www/packages/packages-windows/x86/cryptography/x509/extensions.py new file mode 100644 index 0000000..eb4b927 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/x509/extensions.py @@ -0,0 +1,1429 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import datetime +import hashlib +import ipaddress +from enum import Enum + +from asn1crypto.keys import PublicKeyInfo + +import six + +from cryptography import utils +from cryptography.hazmat.primitives import constant_time, serialization +from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey +from cryptography.x509.certificate_transparency import ( + SignedCertificateTimestamp +) +from cryptography.x509.general_name import GeneralName, IPAddress, OtherName +from cryptography.x509.name import RelativeDistinguishedName +from cryptography.x509.oid import ( + CRLEntryExtensionOID, ExtensionOID, ObjectIdentifier +) + + +def _key_identifier_from_public_key(public_key): + if isinstance(public_key, RSAPublicKey): + data = public_key.public_bytes( + serialization.Encoding.DER, + serialization.PublicFormat.PKCS1, + ) + elif isinstance(public_key, EllipticCurvePublicKey): + data = public_key.public_numbers().encode_point() + else: + # This is a very slow way to do this. + serialized = public_key.public_bytes( + serialization.Encoding.DER, + serialization.PublicFormat.SubjectPublicKeyInfo + ) + + data = six.binary_type(PublicKeyInfo.load(serialized)['public_key']) + + return hashlib.sha1(data).digest() + + +class DuplicateExtension(Exception): + def __init__(self, msg, oid): + super(DuplicateExtension, self).__init__(msg) + self.oid = oid + + +class ExtensionNotFound(Exception): + def __init__(self, msg, oid): + super(ExtensionNotFound, self).__init__(msg) + self.oid = oid + + +@six.add_metaclass(abc.ABCMeta) +class ExtensionType(object): + @abc.abstractproperty + def oid(self): + """ + Returns the oid associated with the given extension type. + """ + + +class Extensions(object): + def __init__(self, extensions): + self._extensions = extensions + + def get_extension_for_oid(self, oid): + for ext in self: + if ext.oid == oid: + return ext + + raise ExtensionNotFound("No {0} extension was found".format(oid), oid) + + def get_extension_for_class(self, extclass): + if extclass is UnrecognizedExtension: + raise TypeError( + "UnrecognizedExtension can't be used with " + "get_extension_for_class because more than one instance of the" + " class may be present." + ) + + for ext in self: + if isinstance(ext.value, extclass): + return ext + + raise ExtensionNotFound( + "No {0} extension was found".format(extclass), extclass.oid + ) + + def __iter__(self): + return iter(self._extensions) + + def __len__(self): + return len(self._extensions) + + def __getitem__(self, idx): + return self._extensions[idx] + + def __repr__(self): + return ( + "".format(self._extensions) + ) + + +@utils.register_interface(ExtensionType) +class CRLNumber(object): + oid = ExtensionOID.CRL_NUMBER + + def __init__(self, crl_number): + if not isinstance(crl_number, six.integer_types): + raise TypeError("crl_number must be an integer") + + self._crl_number = crl_number + + def __eq__(self, other): + if not isinstance(other, CRLNumber): + return NotImplemented + + return self.crl_number == other.crl_number + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.crl_number) + + def __repr__(self): + return "".format(self.crl_number) + + crl_number = utils.read_only_property("_crl_number") + + +@utils.register_interface(ExtensionType) +class AuthorityKeyIdentifier(object): + oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER + + def __init__(self, key_identifier, authority_cert_issuer, + authority_cert_serial_number): + if (authority_cert_issuer is None) != ( + authority_cert_serial_number is None + ): + raise ValueError( + "authority_cert_issuer and authority_cert_serial_number " + "must both be present or both None" + ) + + if authority_cert_issuer is not None: + authority_cert_issuer = list(authority_cert_issuer) + if not all( + isinstance(x, GeneralName) for x in authority_cert_issuer + ): + raise TypeError( + "authority_cert_issuer must be a list of GeneralName " + "objects" + ) + + if authority_cert_serial_number is not None and not isinstance( + authority_cert_serial_number, six.integer_types + ): + raise TypeError( + "authority_cert_serial_number must be an integer" + ) + + self._key_identifier = key_identifier + self._authority_cert_issuer = authority_cert_issuer + self._authority_cert_serial_number = authority_cert_serial_number + + @classmethod + def from_issuer_public_key(cls, public_key): + digest = _key_identifier_from_public_key(public_key) + return cls( + key_identifier=digest, + authority_cert_issuer=None, + authority_cert_serial_number=None + ) + + @classmethod + def from_issuer_subject_key_identifier(cls, ski): + return cls( + key_identifier=ski.value.digest, + authority_cert_issuer=None, + authority_cert_serial_number=None + ) + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, AuthorityKeyIdentifier): + return NotImplemented + + return ( + self.key_identifier == other.key_identifier and + self.authority_cert_issuer == other.authority_cert_issuer and + self.authority_cert_serial_number == + other.authority_cert_serial_number + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self.authority_cert_issuer is None: + aci = None + else: + aci = tuple(self.authority_cert_issuer) + return hash(( + self.key_identifier, aci, self.authority_cert_serial_number + )) + + key_identifier = utils.read_only_property("_key_identifier") + authority_cert_issuer = utils.read_only_property("_authority_cert_issuer") + authority_cert_serial_number = utils.read_only_property( + "_authority_cert_serial_number" + ) + + +@utils.register_interface(ExtensionType) +class SubjectKeyIdentifier(object): + oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER + + def __init__(self, digest): + self._digest = digest + + @classmethod + def from_public_key(cls, public_key): + return cls(_key_identifier_from_public_key(public_key)) + + digest = utils.read_only_property("_digest") + + def __repr__(self): + return "".format(self.digest) + + def __eq__(self, other): + if not isinstance(other, SubjectKeyIdentifier): + return NotImplemented + + return constant_time.bytes_eq(self.digest, other.digest) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.digest) + + +@utils.register_interface(ExtensionType) +class AuthorityInformationAccess(object): + oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS + + def __init__(self, descriptions): + descriptions = list(descriptions) + if not all(isinstance(x, AccessDescription) for x in descriptions): + raise TypeError( + "Every item in the descriptions list must be an " + "AccessDescription" + ) + + self._descriptions = descriptions + + def __iter__(self): + return iter(self._descriptions) + + def __len__(self): + return len(self._descriptions) + + def __repr__(self): + return "".format(self._descriptions) + + def __eq__(self, other): + if not isinstance(other, AuthorityInformationAccess): + return NotImplemented + + return self._descriptions == other._descriptions + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._descriptions[idx] + + def __hash__(self): + return hash(tuple(self._descriptions)) + + +class AccessDescription(object): + def __init__(self, access_method, access_location): + if not isinstance(access_method, ObjectIdentifier): + raise TypeError("access_method must be an ObjectIdentifier") + + if not isinstance(access_location, GeneralName): + raise TypeError("access_location must be a GeneralName") + + self._access_method = access_method + self._access_location = access_location + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, AccessDescription): + return NotImplemented + + return ( + self.access_method == other.access_method and + self.access_location == other.access_location + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.access_method, self.access_location)) + + access_method = utils.read_only_property("_access_method") + access_location = utils.read_only_property("_access_location") + + +@utils.register_interface(ExtensionType) +class BasicConstraints(object): + oid = ExtensionOID.BASIC_CONSTRAINTS + + def __init__(self, ca, path_length): + if not isinstance(ca, bool): + raise TypeError("ca must be a boolean value") + + if path_length is not None and not ca: + raise ValueError("path_length must be None when ca is False") + + if ( + path_length is not None and + (not isinstance(path_length, six.integer_types) or path_length < 0) + ): + raise TypeError( + "path_length must be a non-negative integer or None" + ) + + self._ca = ca + self._path_length = path_length + + ca = utils.read_only_property("_ca") + path_length = utils.read_only_property("_path_length") + + def __repr__(self): + return ("").format(self) + + def __eq__(self, other): + if not isinstance(other, BasicConstraints): + return NotImplemented + + return self.ca == other.ca and self.path_length == other.path_length + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.ca, self.path_length)) + + +@utils.register_interface(ExtensionType) +class DeltaCRLIndicator(object): + oid = ExtensionOID.DELTA_CRL_INDICATOR + + def __init__(self, crl_number): + if not isinstance(crl_number, six.integer_types): + raise TypeError("crl_number must be an integer") + + self._crl_number = crl_number + + crl_number = utils.read_only_property("_crl_number") + + def __eq__(self, other): + if not isinstance(other, DeltaCRLIndicator): + return NotImplemented + + return self.crl_number == other.crl_number + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.crl_number) + + def __repr__(self): + return "".format(self) + + +@utils.register_interface(ExtensionType) +class CRLDistributionPoints(object): + oid = ExtensionOID.CRL_DISTRIBUTION_POINTS + + def __init__(self, distribution_points): + distribution_points = list(distribution_points) + if not all( + isinstance(x, DistributionPoint) for x in distribution_points + ): + raise TypeError( + "distribution_points must be a list of DistributionPoint " + "objects" + ) + + self._distribution_points = distribution_points + + def __iter__(self): + return iter(self._distribution_points) + + def __len__(self): + return len(self._distribution_points) + + def __repr__(self): + return "".format(self._distribution_points) + + def __eq__(self, other): + if not isinstance(other, CRLDistributionPoints): + return NotImplemented + + return self._distribution_points == other._distribution_points + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._distribution_points[idx] + + def __hash__(self): + return hash(tuple(self._distribution_points)) + + +@utils.register_interface(ExtensionType) +class FreshestCRL(object): + oid = ExtensionOID.FRESHEST_CRL + + def __init__(self, distribution_points): + distribution_points = list(distribution_points) + if not all( + isinstance(x, DistributionPoint) for x in distribution_points + ): + raise TypeError( + "distribution_points must be a list of DistributionPoint " + "objects" + ) + + self._distribution_points = distribution_points + + def __iter__(self): + return iter(self._distribution_points) + + def __len__(self): + return len(self._distribution_points) + + def __repr__(self): + return "".format(self._distribution_points) + + def __eq__(self, other): + if not isinstance(other, FreshestCRL): + return NotImplemented + + return self._distribution_points == other._distribution_points + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._distribution_points[idx] + + def __hash__(self): + return hash(tuple(self._distribution_points)) + + +class DistributionPoint(object): + def __init__(self, full_name, relative_name, reasons, crl_issuer): + if full_name and relative_name: + raise ValueError( + "You cannot provide both full_name and relative_name, at " + "least one must be None." + ) + + if full_name: + full_name = list(full_name) + if not all(isinstance(x, GeneralName) for x in full_name): + raise TypeError( + "full_name must be a list of GeneralName objects" + ) + + if relative_name: + if not isinstance(relative_name, RelativeDistinguishedName): + raise TypeError( + "relative_name must be a RelativeDistinguishedName" + ) + + if crl_issuer: + crl_issuer = list(crl_issuer) + if not all(isinstance(x, GeneralName) for x in crl_issuer): + raise TypeError( + "crl_issuer must be None or a list of general names" + ) + + if reasons and (not isinstance(reasons, frozenset) or not all( + isinstance(x, ReasonFlags) for x in reasons + )): + raise TypeError("reasons must be None or frozenset of ReasonFlags") + + if reasons and ( + ReasonFlags.unspecified in reasons or + ReasonFlags.remove_from_crl in reasons + ): + raise ValueError( + "unspecified and remove_from_crl are not valid reasons in a " + "DistributionPoint" + ) + + if reasons and not crl_issuer and not (full_name or relative_name): + raise ValueError( + "You must supply crl_issuer, full_name, or relative_name when " + "reasons is not None" + ) + + self._full_name = full_name + self._relative_name = relative_name + self._reasons = reasons + self._crl_issuer = crl_issuer + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, DistributionPoint): + return NotImplemented + + return ( + self.full_name == other.full_name and + self.relative_name == other.relative_name and + self.reasons == other.reasons and + self.crl_issuer == other.crl_issuer + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self.full_name is not None: + fn = tuple(self.full_name) + else: + fn = None + + if self.crl_issuer is not None: + crl_issuer = tuple(self.crl_issuer) + else: + crl_issuer = None + + return hash((fn, self.relative_name, self.reasons, crl_issuer)) + + full_name = utils.read_only_property("_full_name") + relative_name = utils.read_only_property("_relative_name") + reasons = utils.read_only_property("_reasons") + crl_issuer = utils.read_only_property("_crl_issuer") + + +class ReasonFlags(Enum): + unspecified = "unspecified" + key_compromise = "keyCompromise" + ca_compromise = "cACompromise" + affiliation_changed = "affiliationChanged" + superseded = "superseded" + cessation_of_operation = "cessationOfOperation" + certificate_hold = "certificateHold" + privilege_withdrawn = "privilegeWithdrawn" + aa_compromise = "aACompromise" + remove_from_crl = "removeFromCRL" + + +@utils.register_interface(ExtensionType) +class PolicyConstraints(object): + oid = ExtensionOID.POLICY_CONSTRAINTS + + def __init__(self, require_explicit_policy, inhibit_policy_mapping): + if require_explicit_policy is not None and not isinstance( + require_explicit_policy, six.integer_types + ): + raise TypeError( + "require_explicit_policy must be a non-negative integer or " + "None" + ) + + if inhibit_policy_mapping is not None and not isinstance( + inhibit_policy_mapping, six.integer_types + ): + raise TypeError( + "inhibit_policy_mapping must be a non-negative integer or None" + ) + + if inhibit_policy_mapping is None and require_explicit_policy is None: + raise ValueError( + "At least one of require_explicit_policy and " + "inhibit_policy_mapping must not be None" + ) + + self._require_explicit_policy = require_explicit_policy + self._inhibit_policy_mapping = inhibit_policy_mapping + + def __repr__(self): + return ( + u"".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, PolicyConstraints): + return NotImplemented + + return ( + self.require_explicit_policy == other.require_explicit_policy and + self.inhibit_policy_mapping == other.inhibit_policy_mapping + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash( + (self.require_explicit_policy, self.inhibit_policy_mapping) + ) + + require_explicit_policy = utils.read_only_property( + "_require_explicit_policy" + ) + inhibit_policy_mapping = utils.read_only_property( + "_inhibit_policy_mapping" + ) + + +@utils.register_interface(ExtensionType) +class CertificatePolicies(object): + oid = ExtensionOID.CERTIFICATE_POLICIES + + def __init__(self, policies): + policies = list(policies) + if not all(isinstance(x, PolicyInformation) for x in policies): + raise TypeError( + "Every item in the policies list must be a " + "PolicyInformation" + ) + + self._policies = policies + + def __iter__(self): + return iter(self._policies) + + def __len__(self): + return len(self._policies) + + def __repr__(self): + return "".format(self._policies) + + def __eq__(self, other): + if not isinstance(other, CertificatePolicies): + return NotImplemented + + return self._policies == other._policies + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._policies[idx] + + def __hash__(self): + return hash(tuple(self._policies)) + + +class PolicyInformation(object): + def __init__(self, policy_identifier, policy_qualifiers): + if not isinstance(policy_identifier, ObjectIdentifier): + raise TypeError("policy_identifier must be an ObjectIdentifier") + + self._policy_identifier = policy_identifier + + if policy_qualifiers: + policy_qualifiers = list(policy_qualifiers) + if not all( + isinstance(x, (six.text_type, UserNotice)) + for x in policy_qualifiers + ): + raise TypeError( + "policy_qualifiers must be a list of strings and/or " + "UserNotice objects or None" + ) + + self._policy_qualifiers = policy_qualifiers + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, PolicyInformation): + return NotImplemented + + return ( + self.policy_identifier == other.policy_identifier and + self.policy_qualifiers == other.policy_qualifiers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + if self.policy_qualifiers is not None: + pq = tuple(self.policy_qualifiers) + else: + pq = None + + return hash((self.policy_identifier, pq)) + + policy_identifier = utils.read_only_property("_policy_identifier") + policy_qualifiers = utils.read_only_property("_policy_qualifiers") + + +class UserNotice(object): + def __init__(self, notice_reference, explicit_text): + if notice_reference and not isinstance( + notice_reference, NoticeReference + ): + raise TypeError( + "notice_reference must be None or a NoticeReference" + ) + + self._notice_reference = notice_reference + self._explicit_text = explicit_text + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, UserNotice): + return NotImplemented + + return ( + self.notice_reference == other.notice_reference and + self.explicit_text == other.explicit_text + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.notice_reference, self.explicit_text)) + + notice_reference = utils.read_only_property("_notice_reference") + explicit_text = utils.read_only_property("_explicit_text") + + +class NoticeReference(object): + def __init__(self, organization, notice_numbers): + self._organization = organization + notice_numbers = list(notice_numbers) + if not all(isinstance(x, int) for x in notice_numbers): + raise TypeError( + "notice_numbers must be a list of integers" + ) + + self._notice_numbers = notice_numbers + + def __repr__(self): + return ( + "".format(self) + ) + + def __eq__(self, other): + if not isinstance(other, NoticeReference): + return NotImplemented + + return ( + self.organization == other.organization and + self.notice_numbers == other.notice_numbers + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.organization, tuple(self.notice_numbers))) + + organization = utils.read_only_property("_organization") + notice_numbers = utils.read_only_property("_notice_numbers") + + +@utils.register_interface(ExtensionType) +class ExtendedKeyUsage(object): + oid = ExtensionOID.EXTENDED_KEY_USAGE + + def __init__(self, usages): + usages = list(usages) + if not all(isinstance(x, ObjectIdentifier) for x in usages): + raise TypeError( + "Every item in the usages list must be an ObjectIdentifier" + ) + + self._usages = usages + + def __iter__(self): + return iter(self._usages) + + def __len__(self): + return len(self._usages) + + def __repr__(self): + return "".format(self._usages) + + def __eq__(self, other): + if not isinstance(other, ExtendedKeyUsage): + return NotImplemented + + return self._usages == other._usages + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(tuple(self._usages)) + + +@utils.register_interface(ExtensionType) +class OCSPNoCheck(object): + oid = ExtensionOID.OCSP_NO_CHECK + + +@utils.register_interface(ExtensionType) +class TLSFeature(object): + oid = ExtensionOID.TLS_FEATURE + + def __init__(self, features): + features = list(features) + if ( + not all(isinstance(x, TLSFeatureType) for x in features) or + len(features) == 0 + ): + raise TypeError( + "features must be a list of elements from the TLSFeatureType " + "enum" + ) + + self._features = features + + def __iter__(self): + return iter(self._features) + + def __len__(self): + return len(self._features) + + def __repr__(self): + return "".format(self) + + def __eq__(self, other): + if not isinstance(other, TLSFeature): + return NotImplemented + + return self._features == other._features + + def __getitem__(self, idx): + return self._features[idx] + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(tuple(self._features)) + + +class TLSFeatureType(Enum): + # status_request is defined in RFC 6066 and is used for what is commonly + # called OCSP Must-Staple when present in the TLS Feature extension in an + # X.509 certificate. + status_request = 5 + # status_request_v2 is defined in RFC 6961 and allows multiple OCSP + # responses to be provided. It is not currently in use by clients or + # servers. + status_request_v2 = 17 + + +_TLS_FEATURE_TYPE_TO_ENUM = dict((x.value, x) for x in TLSFeatureType) + + +@utils.register_interface(ExtensionType) +class InhibitAnyPolicy(object): + oid = ExtensionOID.INHIBIT_ANY_POLICY + + def __init__(self, skip_certs): + if not isinstance(skip_certs, six.integer_types): + raise TypeError("skip_certs must be an integer") + + if skip_certs < 0: + raise ValueError("skip_certs must be a non-negative integer") + + self._skip_certs = skip_certs + + def __repr__(self): + return "".format(self) + + def __eq__(self, other): + if not isinstance(other, InhibitAnyPolicy): + return NotImplemented + + return self.skip_certs == other.skip_certs + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.skip_certs) + + skip_certs = utils.read_only_property("_skip_certs") + + +@utils.register_interface(ExtensionType) +class KeyUsage(object): + oid = ExtensionOID.KEY_USAGE + + def __init__(self, digital_signature, content_commitment, key_encipherment, + data_encipherment, key_agreement, key_cert_sign, crl_sign, + encipher_only, decipher_only): + if not key_agreement and (encipher_only or decipher_only): + raise ValueError( + "encipher_only and decipher_only can only be true when " + "key_agreement is true" + ) + + self._digital_signature = digital_signature + self._content_commitment = content_commitment + self._key_encipherment = key_encipherment + self._data_encipherment = data_encipherment + self._key_agreement = key_agreement + self._key_cert_sign = key_cert_sign + self._crl_sign = crl_sign + self._encipher_only = encipher_only + self._decipher_only = decipher_only + + digital_signature = utils.read_only_property("_digital_signature") + content_commitment = utils.read_only_property("_content_commitment") + key_encipherment = utils.read_only_property("_key_encipherment") + data_encipherment = utils.read_only_property("_data_encipherment") + key_agreement = utils.read_only_property("_key_agreement") + key_cert_sign = utils.read_only_property("_key_cert_sign") + crl_sign = utils.read_only_property("_crl_sign") + + @property + def encipher_only(self): + if not self.key_agreement: + raise ValueError( + "encipher_only is undefined unless key_agreement is true" + ) + else: + return self._encipher_only + + @property + def decipher_only(self): + if not self.key_agreement: + raise ValueError( + "decipher_only is undefined unless key_agreement is true" + ) + else: + return self._decipher_only + + def __repr__(self): + try: + encipher_only = self.encipher_only + decipher_only = self.decipher_only + except ValueError: + encipher_only = None + decipher_only = None + + return ("").format( + self, encipher_only, decipher_only) + + def __eq__(self, other): + if not isinstance(other, KeyUsage): + return NotImplemented + + return ( + self.digital_signature == other.digital_signature and + self.content_commitment == other.content_commitment and + self.key_encipherment == other.key_encipherment and + self.data_encipherment == other.data_encipherment and + self.key_agreement == other.key_agreement and + self.key_cert_sign == other.key_cert_sign and + self.crl_sign == other.crl_sign and + self._encipher_only == other._encipher_only and + self._decipher_only == other._decipher_only + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(( + self.digital_signature, self.content_commitment, + self.key_encipherment, self.data_encipherment, + self.key_agreement, self.key_cert_sign, + self.crl_sign, self._encipher_only, + self._decipher_only + )) + + +@utils.register_interface(ExtensionType) +class NameConstraints(object): + oid = ExtensionOID.NAME_CONSTRAINTS + + def __init__(self, permitted_subtrees, excluded_subtrees): + if permitted_subtrees is not None: + permitted_subtrees = list(permitted_subtrees) + if not all( + isinstance(x, GeneralName) for x in permitted_subtrees + ): + raise TypeError( + "permitted_subtrees must be a list of GeneralName objects " + "or None" + ) + + self._validate_ip_name(permitted_subtrees) + + if excluded_subtrees is not None: + excluded_subtrees = list(excluded_subtrees) + if not all( + isinstance(x, GeneralName) for x in excluded_subtrees + ): + raise TypeError( + "excluded_subtrees must be a list of GeneralName objects " + "or None" + ) + + self._validate_ip_name(excluded_subtrees) + + if permitted_subtrees is None and excluded_subtrees is None: + raise ValueError( + "At least one of permitted_subtrees and excluded_subtrees " + "must not be None" + ) + + self._permitted_subtrees = permitted_subtrees + self._excluded_subtrees = excluded_subtrees + + def __eq__(self, other): + if not isinstance(other, NameConstraints): + return NotImplemented + + return ( + self.excluded_subtrees == other.excluded_subtrees and + self.permitted_subtrees == other.permitted_subtrees + ) + + def __ne__(self, other): + return not self == other + + def _validate_ip_name(self, tree): + if any(isinstance(name, IPAddress) and not isinstance( + name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network) + ) for name in tree): + raise TypeError( + "IPAddress name constraints must be an IPv4Network or" + " IPv6Network object" + ) + + def __repr__(self): + return ( + u"".format(self) + ) + + def __hash__(self): + if self.permitted_subtrees is not None: + ps = tuple(self.permitted_subtrees) + else: + ps = None + + if self.excluded_subtrees is not None: + es = tuple(self.excluded_subtrees) + else: + es = None + + return hash((ps, es)) + + permitted_subtrees = utils.read_only_property("_permitted_subtrees") + excluded_subtrees = utils.read_only_property("_excluded_subtrees") + + +class Extension(object): + def __init__(self, oid, critical, value): + if not isinstance(oid, ObjectIdentifier): + raise TypeError( + "oid argument must be an ObjectIdentifier instance." + ) + + if not isinstance(critical, bool): + raise TypeError("critical must be a boolean value") + + self._oid = oid + self._critical = critical + self._value = value + + oid = utils.read_only_property("_oid") + critical = utils.read_only_property("_critical") + value = utils.read_only_property("_value") + + def __repr__(self): + return ("").format(self) + + def __eq__(self, other): + if not isinstance(other, Extension): + return NotImplemented + + return ( + self.oid == other.oid and + self.critical == other.critical and + self.value == other.value + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.oid, self.critical, self.value)) + + +class GeneralNames(object): + def __init__(self, general_names): + general_names = list(general_names) + if not all(isinstance(x, GeneralName) for x in general_names): + raise TypeError( + "Every item in the general_names list must be an " + "object conforming to the GeneralName interface" + ) + + self._general_names = general_names + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + # Return the value of each GeneralName, except for OtherName instances + # which we return directly because it has two important properties not + # just one value. + objs = (i for i in self if isinstance(i, type)) + if type != OtherName: + objs = (i.value for i in objs) + return list(objs) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, GeneralNames): + return NotImplemented + + return self._general_names == other._general_names + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._general_names[idx] + + def __hash__(self): + return hash(tuple(self._general_names)) + + +@utils.register_interface(ExtensionType) +class SubjectAlternativeName(object): + oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME + + def __init__(self, general_names): + self._general_names = GeneralNames(general_names) + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + return self._general_names.get_values_for_type(type) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, SubjectAlternativeName): + return NotImplemented + + return self._general_names == other._general_names + + def __getitem__(self, idx): + return self._general_names[idx] + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self._general_names) + + +@utils.register_interface(ExtensionType) +class IssuerAlternativeName(object): + oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME + + def __init__(self, general_names): + self._general_names = GeneralNames(general_names) + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + return self._general_names.get_values_for_type(type) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, IssuerAlternativeName): + return NotImplemented + + return self._general_names == other._general_names + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._general_names[idx] + + def __hash__(self): + return hash(self._general_names) + + +@utils.register_interface(ExtensionType) +class CertificateIssuer(object): + oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER + + def __init__(self, general_names): + self._general_names = GeneralNames(general_names) + + def __iter__(self): + return iter(self._general_names) + + def __len__(self): + return len(self._general_names) + + def get_values_for_type(self, type): + return self._general_names.get_values_for_type(type) + + def __repr__(self): + return "".format(self._general_names) + + def __eq__(self, other): + if not isinstance(other, CertificateIssuer): + return NotImplemented + + return self._general_names == other._general_names + + def __ne__(self, other): + return not self == other + + def __getitem__(self, idx): + return self._general_names[idx] + + def __hash__(self): + return hash(self._general_names) + + +@utils.register_interface(ExtensionType) +class CRLReason(object): + oid = CRLEntryExtensionOID.CRL_REASON + + def __init__(self, reason): + if not isinstance(reason, ReasonFlags): + raise TypeError("reason must be an element from ReasonFlags") + + self._reason = reason + + def __repr__(self): + return "".format(self._reason) + + def __eq__(self, other): + if not isinstance(other, CRLReason): + return NotImplemented + + return self.reason == other.reason + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.reason) + + reason = utils.read_only_property("_reason") + + +@utils.register_interface(ExtensionType) +class InvalidityDate(object): + oid = CRLEntryExtensionOID.INVALIDITY_DATE + + def __init__(self, invalidity_date): + if not isinstance(invalidity_date, datetime.datetime): + raise TypeError("invalidity_date must be a datetime.datetime") + + self._invalidity_date = invalidity_date + + def __repr__(self): + return "".format( + self._invalidity_date + ) + + def __eq__(self, other): + if not isinstance(other, InvalidityDate): + return NotImplemented + + return self.invalidity_date == other.invalidity_date + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.invalidity_date) + + invalidity_date = utils.read_only_property("_invalidity_date") + + +@utils.register_interface(ExtensionType) +class PrecertificateSignedCertificateTimestamps(object): + oid = ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS + + def __init__(self, signed_certificate_timestamps): + signed_certificate_timestamps = list(signed_certificate_timestamps) + if not all( + isinstance(sct, SignedCertificateTimestamp) + for sct in signed_certificate_timestamps + ): + raise TypeError( + "Every item in the signed_certificate_timestamps list must be " + "a SignedCertificateTimestamp" + ) + self._signed_certificate_timestamps = signed_certificate_timestamps + + def __iter__(self): + return iter(self._signed_certificate_timestamps) + + def __len__(self): + return len(self._signed_certificate_timestamps) + + def __getitem__(self, idx): + return self._signed_certificate_timestamps[idx] + + def __repr__(self): + return ( + "".format( + list(self) + ) + ) + + +@utils.register_interface(ExtensionType) +class UnrecognizedExtension(object): + def __init__(self, oid, value): + if not isinstance(oid, ObjectIdentifier): + raise TypeError("oid must be an ObjectIdentifier") + self._oid = oid + self._value = value + + oid = utils.read_only_property("_oid") + value = utils.read_only_property("_value") + + def __repr__(self): + return ( + "".format( + self + ) + ) + + def __eq__(self, other): + if not isinstance(other, UnrecognizedExtension): + return NotImplemented + + return self.oid == other.oid and self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.oid, self.value)) diff --git a/server/www/packages/packages-windows/x86/cryptography/x509/general_name.py b/server/www/packages/packages-windows/x86/cryptography/x509/general_name.py new file mode 100644 index 0000000..26f389a --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/x509/general_name.py @@ -0,0 +1,345 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import abc +import ipaddress +import warnings +from email.utils import parseaddr + +import idna + +import six +from six.moves import urllib_parse + +from cryptography import utils +from cryptography.x509.name import Name +from cryptography.x509.oid import ObjectIdentifier + + +_GENERAL_NAMES = { + 0: "otherName", + 1: "rfc822Name", + 2: "dNSName", + 3: "x400Address", + 4: "directoryName", + 5: "ediPartyName", + 6: "uniformResourceIdentifier", + 7: "iPAddress", + 8: "registeredID", +} + + +class UnsupportedGeneralNameType(Exception): + def __init__(self, msg, type): + super(UnsupportedGeneralNameType, self).__init__(msg) + self.type = type + + +@six.add_metaclass(abc.ABCMeta) +class GeneralName(object): + @abc.abstractproperty + def value(self): + """ + Return the value of the object + """ + + +@utils.register_interface(GeneralName) +class RFC822Name(object): + def __init__(self, value): + if isinstance(value, six.text_type): + try: + value.encode("ascii") + except UnicodeEncodeError: + value = self._idna_encode(value) + warnings.warn( + "RFC822Name values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "idna. Support for passing unicode strings (aka U-label) " + "will be removed in a future version.", + utils.DeprecatedIn21, + stacklevel=2, + ) + else: + raise TypeError("value must be string") + + name, address = parseaddr(value) + if name or not address: + # parseaddr has found a name (e.g. Name ) or the entire + # value is an empty string. + raise ValueError("Invalid rfc822name value") + + self._value = value + + value = utils.read_only_property("_value") + + @classmethod + def _init_without_validation(cls, value): + instance = cls.__new__(cls) + instance._value = value + return instance + + def _idna_encode(self, value): + _, address = parseaddr(value) + parts = address.split(u"@") + return parts[0] + "@" + idna.encode(parts[1]).decode("ascii") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, RFC822Name): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +def _idna_encode(value): + # Retain prefixes '*.' for common/alt names and '.' for name constraints + for prefix in ['*.', '.']: + if value.startswith(prefix): + value = value[len(prefix):] + return prefix + idna.encode(value).decode("ascii") + return idna.encode(value).decode("ascii") + + +@utils.register_interface(GeneralName) +class DNSName(object): + def __init__(self, value): + if isinstance(value, six.text_type): + try: + value.encode("ascii") + except UnicodeEncodeError: + value = _idna_encode(value) + warnings.warn( + "DNSName values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "idna. Support for passing unicode strings (aka U-label) " + "will be removed in a future version.", + utils.DeprecatedIn21, + stacklevel=2, + ) + else: + raise TypeError("value must be string") + + self._value = value + + value = utils.read_only_property("_value") + + @classmethod + def _init_without_validation(cls, value): + instance = cls.__new__(cls) + instance._value = value + return instance + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, DNSName): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class UniformResourceIdentifier(object): + def __init__(self, value): + if isinstance(value, six.text_type): + try: + value.encode("ascii") + except UnicodeEncodeError: + value = self._idna_encode(value) + warnings.warn( + "URI values should be passed as an A-label string. " + "This means unicode characters should be encoded via " + "idna. Support for passing unicode strings (aka U-label) " + " will be removed in a future version.", + utils.DeprecatedIn21, + stacklevel=2, + ) + else: + raise TypeError("value must be string") + + self._value = value + + value = utils.read_only_property("_value") + + @classmethod + def _init_without_validation(cls, value): + instance = cls.__new__(cls) + instance._value = value + return instance + + def _idna_encode(self, value): + parsed = urllib_parse.urlparse(value) + if parsed.port: + netloc = ( + idna.encode(parsed.hostname) + + ":{0}".format(parsed.port).encode("ascii") + ).decode("ascii") + else: + netloc = idna.encode(parsed.hostname).decode("ascii") + + # Note that building a URL in this fashion means it should be + # semantically indistinguishable from the original but is not + # guaranteed to be exactly the same. + return urllib_parse.urlunparse(( + parsed.scheme, + netloc, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment + )) + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, UniformResourceIdentifier): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class DirectoryName(object): + def __init__(self, value): + if not isinstance(value, Name): + raise TypeError("value must be a Name") + + self._value = value + + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, DirectoryName): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class RegisteredID(object): + def __init__(self, value): + if not isinstance(value, ObjectIdentifier): + raise TypeError("value must be an ObjectIdentifier") + + self._value = value + + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, RegisteredID): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class IPAddress(object): + def __init__(self, value): + if not isinstance( + value, + ( + ipaddress.IPv4Address, + ipaddress.IPv6Address, + ipaddress.IPv4Network, + ipaddress.IPv6Network + ) + ): + raise TypeError( + "value must be an instance of ipaddress.IPv4Address, " + "ipaddress.IPv6Address, ipaddress.IPv4Network, or " + "ipaddress.IPv6Network" + ) + + self._value = value + + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format(self.value) + + def __eq__(self, other): + if not isinstance(other, IPAddress): + return NotImplemented + + return self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.value) + + +@utils.register_interface(GeneralName) +class OtherName(object): + def __init__(self, type_id, value): + if not isinstance(type_id, ObjectIdentifier): + raise TypeError("type_id must be an ObjectIdentifier") + if not isinstance(value, bytes): + raise TypeError("value must be a binary string") + + self._type_id = type_id + self._value = value + + type_id = utils.read_only_property("_type_id") + value = utils.read_only_property("_value") + + def __repr__(self): + return "".format( + self.type_id, self.value) + + def __eq__(self, other): + if not isinstance(other, OtherName): + return NotImplemented + + return self.type_id == other.type_id and self.value == other.value + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.type_id, self.value)) diff --git a/server/www/packages/packages-windows/x86/cryptography/x509/name.py b/server/www/packages/packages-windows/x86/cryptography/x509/name.py new file mode 100644 index 0000000..5548eda --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/x509/name.py @@ -0,0 +1,190 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from enum import Enum + +import six + +from cryptography import utils +from cryptography.x509.oid import NameOID, ObjectIdentifier + + +class _ASN1Type(Enum): + UTF8String = 12 + NumericString = 18 + PrintableString = 19 + T61String = 20 + IA5String = 22 + UTCTime = 23 + GeneralizedTime = 24 + VisibleString = 26 + UniversalString = 28 + BMPString = 30 + + +_ASN1_TYPE_TO_ENUM = dict((i.value, i) for i in _ASN1Type) +_SENTINEL = object() +_NAMEOID_DEFAULT_TYPE = { + NameOID.COUNTRY_NAME: _ASN1Type.PrintableString, + NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString, + NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString, + NameOID.DN_QUALIFIER: _ASN1Type.PrintableString, + NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String, + NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String, +} + + +class NameAttribute(object): + def __init__(self, oid, value, _type=_SENTINEL): + if not isinstance(oid, ObjectIdentifier): + raise TypeError( + "oid argument must be an ObjectIdentifier instance." + ) + + if not isinstance(value, six.text_type): + raise TypeError( + "value argument must be a text type." + ) + + if ( + oid == NameOID.COUNTRY_NAME or + oid == NameOID.JURISDICTION_COUNTRY_NAME + ): + if len(value.encode("utf8")) != 2: + raise ValueError( + "Country name must be a 2 character country code" + ) + + if len(value) == 0: + raise ValueError("Value cannot be an empty string") + + # The appropriate ASN1 string type varies by OID and is defined across + # multiple RFCs including 2459, 3280, and 5280. In general UTF8String + # is preferred (2459), but 3280 and 5280 specify several OIDs with + # alternate types. This means when we see the sentinel value we need + # to look up whether the OID has a non-UTF8 type. If it does, set it + # to that. Otherwise, UTF8! + if _type == _SENTINEL: + _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String) + + if not isinstance(_type, _ASN1Type): + raise TypeError("_type must be from the _ASN1Type enum") + + self._oid = oid + self._value = value + self._type = _type + + oid = utils.read_only_property("_oid") + value = utils.read_only_property("_value") + + def __eq__(self, other): + if not isinstance(other, NameAttribute): + return NotImplemented + + return ( + self.oid == other.oid and + self.value == other.value + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.oid, self.value)) + + def __repr__(self): + return "".format(self) + + +class RelativeDistinguishedName(object): + def __init__(self, attributes): + attributes = list(attributes) + if not attributes: + raise ValueError("a relative distinguished name cannot be empty") + if not all(isinstance(x, NameAttribute) for x in attributes): + raise TypeError("attributes must be an iterable of NameAttribute") + + # Keep list and frozenset to preserve attribute order where it matters + self._attributes = attributes + self._attribute_set = frozenset(attributes) + + if len(self._attribute_set) != len(attributes): + raise ValueError("duplicate attributes are not allowed") + + def get_attributes_for_oid(self, oid): + return [i for i in self if i.oid == oid] + + def __eq__(self, other): + if not isinstance(other, RelativeDistinguishedName): + return NotImplemented + + return self._attribute_set == other._attribute_set + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self._attribute_set) + + def __iter__(self): + return iter(self._attributes) + + def __len__(self): + return len(self._attributes) + + def __repr__(self): + return "".format(list(self)) + + +class Name(object): + def __init__(self, attributes): + attributes = list(attributes) + if all(isinstance(x, NameAttribute) for x in attributes): + self._attributes = [ + RelativeDistinguishedName([x]) for x in attributes + ] + elif all(isinstance(x, RelativeDistinguishedName) for x in attributes): + self._attributes = attributes + else: + raise TypeError( + "attributes must be a list of NameAttribute" + " or a list RelativeDistinguishedName" + ) + + def get_attributes_for_oid(self, oid): + return [i for i in self if i.oid == oid] + + @property + def rdns(self): + return self._attributes + + def public_bytes(self, backend): + return backend.x509_name_bytes(self) + + def __eq__(self, other): + if not isinstance(other, Name): + return NotImplemented + + return self._attributes == other._attributes + + def __ne__(self, other): + return not self == other + + def __hash__(self): + # TODO: this is relatively expensive, if this looks like a bottleneck + # for you, consider optimizing! + return hash(tuple(self._attributes)) + + def __iter__(self): + for rdn in self._attributes: + for ava in rdn: + yield ava + + def __len__(self): + return sum(len(rdn) for rdn in self._attributes) + + def __repr__(self): + return "".format(list(self)) diff --git a/server/www/packages/packages-windows/x86/cryptography/x509/oid.py b/server/www/packages/packages-windows/x86/cryptography/x509/oid.py new file mode 100644 index 0000000..90003d7 --- /dev/null +++ b/server/www/packages/packages-windows/x86/cryptography/x509/oid.py @@ -0,0 +1,271 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +from cryptography import utils +from cryptography.hazmat.primitives import hashes + + +class ObjectIdentifier(object): + def __init__(self, dotted_string): + self._dotted_string = dotted_string + + nodes = self._dotted_string.split(".") + intnodes = [] + + # There must be at least 2 nodes, the first node must be 0..2, and + # if less than 2, the second node cannot have a value outside the + # range 0..39. All nodes must be integers. + for node in nodes: + try: + intnodes.append(int(node, 0)) + except ValueError: + raise ValueError( + "Malformed OID: %s (non-integer nodes)" % ( + self._dotted_string)) + + if len(nodes) < 2: + raise ValueError( + "Malformed OID: %s (insufficient number of nodes)" % ( + self._dotted_string)) + + if intnodes[0] > 2: + raise ValueError( + "Malformed OID: %s (first node outside valid range)" % ( + self._dotted_string)) + + if intnodes[0] < 2 and intnodes[1] >= 40: + raise ValueError( + "Malformed OID: %s (second node outside valid range)" % ( + self._dotted_string)) + + def __eq__(self, other): + if not isinstance(other, ObjectIdentifier): + return NotImplemented + + return self.dotted_string == other.dotted_string + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return "".format( + self.dotted_string, + self._name + ) + + def __hash__(self): + return hash(self.dotted_string) + + @property + def _name(self): + return _OID_NAMES.get(self, "Unknown OID") + + dotted_string = utils.read_only_property("_dotted_string") + + +class ExtensionOID(object): + SUBJECT_DIRECTORY_ATTRIBUTES = ObjectIdentifier("2.5.29.9") + SUBJECT_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.14") + KEY_USAGE = ObjectIdentifier("2.5.29.15") + SUBJECT_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.17") + ISSUER_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.18") + BASIC_CONSTRAINTS = ObjectIdentifier("2.5.29.19") + NAME_CONSTRAINTS = ObjectIdentifier("2.5.29.30") + CRL_DISTRIBUTION_POINTS = ObjectIdentifier("2.5.29.31") + CERTIFICATE_POLICIES = ObjectIdentifier("2.5.29.32") + POLICY_MAPPINGS = ObjectIdentifier("2.5.29.33") + AUTHORITY_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.35") + POLICY_CONSTRAINTS = ObjectIdentifier("2.5.29.36") + EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37") + FRESHEST_CRL = ObjectIdentifier("2.5.29.46") + INHIBIT_ANY_POLICY = ObjectIdentifier("2.5.29.54") + AUTHORITY_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.1") + SUBJECT_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.11") + OCSP_NO_CHECK = ObjectIdentifier("1.3.6.1.5.5.7.48.1.5") + TLS_FEATURE = ObjectIdentifier("1.3.6.1.5.5.7.1.24") + CRL_NUMBER = ObjectIdentifier("2.5.29.20") + DELTA_CRL_INDICATOR = ObjectIdentifier("2.5.29.27") + PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS = ( + ObjectIdentifier("1.3.6.1.4.1.11129.2.4.2") + ) + + +class CRLEntryExtensionOID(object): + CERTIFICATE_ISSUER = ObjectIdentifier("2.5.29.29") + CRL_REASON = ObjectIdentifier("2.5.29.21") + INVALIDITY_DATE = ObjectIdentifier("2.5.29.24") + + +class NameOID(object): + COMMON_NAME = ObjectIdentifier("2.5.4.3") + COUNTRY_NAME = ObjectIdentifier("2.5.4.6") + LOCALITY_NAME = ObjectIdentifier("2.5.4.7") + STATE_OR_PROVINCE_NAME = ObjectIdentifier("2.5.4.8") + STREET_ADDRESS = ObjectIdentifier("2.5.4.9") + ORGANIZATION_NAME = ObjectIdentifier("2.5.4.10") + ORGANIZATIONAL_UNIT_NAME = ObjectIdentifier("2.5.4.11") + SERIAL_NUMBER = ObjectIdentifier("2.5.4.5") + SURNAME = ObjectIdentifier("2.5.4.4") + GIVEN_NAME = ObjectIdentifier("2.5.4.42") + TITLE = ObjectIdentifier("2.5.4.12") + GENERATION_QUALIFIER = ObjectIdentifier("2.5.4.44") + X500_UNIQUE_IDENTIFIER = ObjectIdentifier("2.5.4.45") + DN_QUALIFIER = ObjectIdentifier("2.5.4.46") + PSEUDONYM = ObjectIdentifier("2.5.4.65") + USER_ID = ObjectIdentifier("0.9.2342.19200300.100.1.1") + DOMAIN_COMPONENT = ObjectIdentifier("0.9.2342.19200300.100.1.25") + EMAIL_ADDRESS = ObjectIdentifier("1.2.840.113549.1.9.1") + JURISDICTION_COUNTRY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.3") + JURISDICTION_LOCALITY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.1") + JURISDICTION_STATE_OR_PROVINCE_NAME = ObjectIdentifier( + "1.3.6.1.4.1.311.60.2.1.2" + ) + BUSINESS_CATEGORY = ObjectIdentifier("2.5.4.15") + POSTAL_ADDRESS = ObjectIdentifier("2.5.4.16") + POSTAL_CODE = ObjectIdentifier("2.5.4.17") + + +class SignatureAlgorithmOID(object): + RSA_WITH_MD5 = ObjectIdentifier("1.2.840.113549.1.1.4") + RSA_WITH_SHA1 = ObjectIdentifier("1.2.840.113549.1.1.5") + # This is an alternate OID for RSA with SHA1 that is occasionally seen + _RSA_WITH_SHA1 = ObjectIdentifier("1.3.14.3.2.29") + RSA_WITH_SHA224 = ObjectIdentifier("1.2.840.113549.1.1.14") + RSA_WITH_SHA256 = ObjectIdentifier("1.2.840.113549.1.1.11") + RSA_WITH_SHA384 = ObjectIdentifier("1.2.840.113549.1.1.12") + RSA_WITH_SHA512 = ObjectIdentifier("1.2.840.113549.1.1.13") + RSASSA_PSS = ObjectIdentifier("1.2.840.113549.1.1.10") + ECDSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10045.4.1") + ECDSA_WITH_SHA224 = ObjectIdentifier("1.2.840.10045.4.3.1") + ECDSA_WITH_SHA256 = ObjectIdentifier("1.2.840.10045.4.3.2") + ECDSA_WITH_SHA384 = ObjectIdentifier("1.2.840.10045.4.3.3") + ECDSA_WITH_SHA512 = ObjectIdentifier("1.2.840.10045.4.3.4") + DSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10040.4.3") + DSA_WITH_SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.3.1") + DSA_WITH_SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.3.2") + + +_SIG_OIDS_TO_HASH = { + SignatureAlgorithmOID.RSA_WITH_MD5: hashes.MD5(), + SignatureAlgorithmOID.RSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID._RSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.RSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.RSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.RSA_WITH_SHA384: hashes.SHA384(), + SignatureAlgorithmOID.RSA_WITH_SHA512: hashes.SHA512(), + SignatureAlgorithmOID.ECDSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.ECDSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.ECDSA_WITH_SHA256: hashes.SHA256(), + SignatureAlgorithmOID.ECDSA_WITH_SHA384: hashes.SHA384(), + SignatureAlgorithmOID.ECDSA_WITH_SHA512: hashes.SHA512(), + SignatureAlgorithmOID.DSA_WITH_SHA1: hashes.SHA1(), + SignatureAlgorithmOID.DSA_WITH_SHA224: hashes.SHA224(), + SignatureAlgorithmOID.DSA_WITH_SHA256: hashes.SHA256() +} + + +class ExtendedKeyUsageOID(object): + SERVER_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.1") + CLIENT_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.2") + CODE_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.3") + EMAIL_PROTECTION = ObjectIdentifier("1.3.6.1.5.5.7.3.4") + TIME_STAMPING = ObjectIdentifier("1.3.6.1.5.5.7.3.8") + OCSP_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.9") + ANY_EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37.0") + + +class AuthorityInformationAccessOID(object): + CA_ISSUERS = ObjectIdentifier("1.3.6.1.5.5.7.48.2") + OCSP = ObjectIdentifier("1.3.6.1.5.5.7.48.1") + + +class CertificatePoliciesOID(object): + CPS_QUALIFIER = ObjectIdentifier("1.3.6.1.5.5.7.2.1") + CPS_USER_NOTICE = ObjectIdentifier("1.3.6.1.5.5.7.2.2") + ANY_POLICY = ObjectIdentifier("2.5.29.32.0") + + +_OID_NAMES = { + NameOID.COMMON_NAME: "commonName", + NameOID.COUNTRY_NAME: "countryName", + NameOID.LOCALITY_NAME: "localityName", + NameOID.STATE_OR_PROVINCE_NAME: "stateOrProvinceName", + NameOID.STREET_ADDRESS: "streetAddress", + NameOID.ORGANIZATION_NAME: "organizationName", + NameOID.ORGANIZATIONAL_UNIT_NAME: "organizationalUnitName", + NameOID.SERIAL_NUMBER: "serialNumber", + NameOID.SURNAME: "surname", + NameOID.GIVEN_NAME: "givenName", + NameOID.TITLE: "title", + NameOID.GENERATION_QUALIFIER: "generationQualifier", + NameOID.X500_UNIQUE_IDENTIFIER: "x500UniqueIdentifier", + NameOID.DN_QUALIFIER: "dnQualifier", + NameOID.PSEUDONYM: "pseudonym", + NameOID.USER_ID: "userID", + NameOID.DOMAIN_COMPONENT: "domainComponent", + NameOID.EMAIL_ADDRESS: "emailAddress", + NameOID.JURISDICTION_COUNTRY_NAME: "jurisdictionCountryName", + NameOID.JURISDICTION_LOCALITY_NAME: "jurisdictionLocalityName", + NameOID.JURISDICTION_STATE_OR_PROVINCE_NAME: ( + "jurisdictionStateOrProvinceName" + ), + NameOID.BUSINESS_CATEGORY: "businessCategory", + NameOID.POSTAL_ADDRESS: "postalAddress", + NameOID.POSTAL_CODE: "postalCode", + + SignatureAlgorithmOID.RSA_WITH_MD5: "md5WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA1: "sha1WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA224: "sha224WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA256: "sha256WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA384: "sha384WithRSAEncryption", + SignatureAlgorithmOID.RSA_WITH_SHA512: "sha512WithRSAEncryption", + SignatureAlgorithmOID.RSASSA_PSS: "RSASSA-PSS", + SignatureAlgorithmOID.ECDSA_WITH_SHA1: "ecdsa-with-SHA1", + SignatureAlgorithmOID.ECDSA_WITH_SHA224: "ecdsa-with-SHA224", + SignatureAlgorithmOID.ECDSA_WITH_SHA256: "ecdsa-with-SHA256", + SignatureAlgorithmOID.ECDSA_WITH_SHA384: "ecdsa-with-SHA384", + SignatureAlgorithmOID.ECDSA_WITH_SHA512: "ecdsa-with-SHA512", + SignatureAlgorithmOID.DSA_WITH_SHA1: "dsa-with-sha1", + SignatureAlgorithmOID.DSA_WITH_SHA224: "dsa-with-sha224", + SignatureAlgorithmOID.DSA_WITH_SHA256: "dsa-with-sha256", + ExtendedKeyUsageOID.SERVER_AUTH: "serverAuth", + ExtendedKeyUsageOID.CLIENT_AUTH: "clientAuth", + ExtendedKeyUsageOID.CODE_SIGNING: "codeSigning", + ExtendedKeyUsageOID.EMAIL_PROTECTION: "emailProtection", + ExtendedKeyUsageOID.TIME_STAMPING: "timeStamping", + ExtendedKeyUsageOID.OCSP_SIGNING: "OCSPSigning", + ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES: "subjectDirectoryAttributes", + ExtensionOID.SUBJECT_KEY_IDENTIFIER: "subjectKeyIdentifier", + ExtensionOID.KEY_USAGE: "keyUsage", + ExtensionOID.SUBJECT_ALTERNATIVE_NAME: "subjectAltName", + ExtensionOID.ISSUER_ALTERNATIVE_NAME: "issuerAltName", + ExtensionOID.BASIC_CONSTRAINTS: "basicConstraints", + ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS: ( + "signedCertificateTimestampList" + ), + CRLEntryExtensionOID.CRL_REASON: "cRLReason", + CRLEntryExtensionOID.INVALIDITY_DATE: "invalidityDate", + CRLEntryExtensionOID.CERTIFICATE_ISSUER: "certificateIssuer", + ExtensionOID.NAME_CONSTRAINTS: "nameConstraints", + ExtensionOID.CRL_DISTRIBUTION_POINTS: "cRLDistributionPoints", + ExtensionOID.CERTIFICATE_POLICIES: "certificatePolicies", + ExtensionOID.POLICY_MAPPINGS: "policyMappings", + ExtensionOID.AUTHORITY_KEY_IDENTIFIER: "authorityKeyIdentifier", + ExtensionOID.POLICY_CONSTRAINTS: "policyConstraints", + ExtensionOID.EXTENDED_KEY_USAGE: "extendedKeyUsage", + ExtensionOID.FRESHEST_CRL: "freshestCRL", + ExtensionOID.INHIBIT_ANY_POLICY: "inhibitAnyPolicy", + ExtensionOID.AUTHORITY_INFORMATION_ACCESS: "authorityInfoAccess", + ExtensionOID.SUBJECT_INFORMATION_ACCESS: "subjectInfoAccess", + ExtensionOID.OCSP_NO_CHECK: "OCSPNoCheck", + ExtensionOID.CRL_NUMBER: "cRLNumber", + ExtensionOID.DELTA_CRL_INDICATOR: "deltaCRLIndicator", + ExtensionOID.TLS_FEATURE: "TLSFeature", + AuthorityInformationAccessOID.OCSP: "OCSP", + AuthorityInformationAccessOID.CA_ISSUERS: "caIssuers", + CertificatePoliciesOID.CPS_QUALIFIER: "id-qt-cps", + CertificatePoliciesOID.CPS_USER_NOTICE: "id-qt-unotice", +} diff --git a/server/www/packages/packages-windows/x86/mako/__init__.py b/server/www/packages/packages-windows/x86/mako/__init__.py new file mode 100644 index 0000000..01c1739 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/__init__.py @@ -0,0 +1,8 @@ +# mako/__init__.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +__version__ = '1.0.7' diff --git a/server/www/packages/packages-windows/x86/mako/_ast_util.py b/server/www/packages/packages-windows/x86/mako/_ast_util.py new file mode 100644 index 0000000..c410287 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/_ast_util.py @@ -0,0 +1,851 @@ +# mako/_ast_util.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +""" +from _ast import * # noqa +from mako.compat import arg_stringname + +BOOLOP_SYMBOLS = { + And: 'and', + Or: 'or' +} + +BINOP_SYMBOLS = { + Add: '+', + Sub: '-', + Mult: '*', + Div: '/', + FloorDiv: '//', + Mod: '%', + LShift: '<<', + RShift: '>>', + BitOr: '|', + BitAnd: '&', + BitXor: '^' +} + +CMPOP_SYMBOLS = { + Eq: '==', + Gt: '>', + GtE: '>=', + In: 'in', + Is: 'is', + IsNot: 'is not', + Lt: '<', + LtE: '<=', + NotEq: '!=', + NotIn: 'not in' +} + +UNARYOP_SYMBOLS = { + Invert: '~', + Not: 'not', + UAdd: '+', + USub: '-' +} + +ALL_SYMBOLS = {} +ALL_SYMBOLS.update(BOOLOP_SYMBOLS) +ALL_SYMBOLS.update(BINOP_SYMBOLS) +ALL_SYMBOLS.update(CMPOP_SYMBOLS) +ALL_SYMBOLS.update(UNARYOP_SYMBOLS) + + +def parse(expr, filename='', mode='exec'): + """Parse an expression into an AST node.""" + return compile(expr, filename, mode, PyCF_ONLY_AST) + + +def to_source(node, indent_with=' ' * 4): + """ + This function can convert a node tree back into python sourcecode. This + is useful for debugging purposes, especially if you're dealing with custom + asts not generated by python itself. + + It could be that the sourcecode is evaluable when the AST itself is not + compilable / evaluable. The reason for this is that the AST contains some + more data than regular sourcecode does, which is dropped during + conversion. + + Each level of indentation is replaced with `indent_with`. Per default this + parameter is equal to four spaces as suggested by PEP 8, but it might be + adjusted to match the application's styleguide. + """ + generator = SourceGenerator(indent_with) + generator.visit(node) + return ''.join(generator.result) + + +def dump(node): + """ + A very verbose representation of the node passed. This is useful for + debugging purposes. + """ + def _format(node): + if isinstance(node, AST): + return '%s(%s)' % (node.__class__.__name__, + ', '.join('%s=%s' % (a, _format(b)) + for a, b in iter_fields(node))) + elif isinstance(node, list): + return '[%s]' % ', '.join(_format(x) for x in node) + return repr(node) + if not isinstance(node, AST): + raise TypeError('expected AST, got %r' % node.__class__.__name__) + return _format(node) + + +def copy_location(new_node, old_node): + """ + Copy the source location hint (`lineno` and `col_offset`) from the + old to the new node if possible and return the new one. + """ + for attr in 'lineno', 'col_offset': + if attr in old_node._attributes and attr in new_node._attributes \ + and hasattr(old_node, attr): + setattr(new_node, attr, getattr(old_node, attr)) + return new_node + + +def fix_missing_locations(node): + """ + Some nodes require a line number and the column offset. Without that + information the compiler will abort the compilation. Because it can be + a dull task to add appropriate line numbers and column offsets when + adding new nodes this function can help. It copies the line number and + column offset of the parent node to the child nodes without this + information. + + Unlike `copy_location` this works recursive and won't touch nodes that + already have a location information. + """ + def _fix(node, lineno, col_offset): + if 'lineno' in node._attributes: + if not hasattr(node, 'lineno'): + node.lineno = lineno + else: + lineno = node.lineno + if 'col_offset' in node._attributes: + if not hasattr(node, 'col_offset'): + node.col_offset = col_offset + else: + col_offset = node.col_offset + for child in iter_child_nodes(node): + _fix(child, lineno, col_offset) + _fix(node, 1, 0) + return node + + +def increment_lineno(node, n=1): + """ + Increment the line numbers of all nodes by `n` if they have line number + attributes. This is useful to "move code" to a different location in a + file. + """ + for node in zip((node,), walk(node)): + if 'lineno' in node._attributes: + node.lineno = getattr(node, 'lineno', 0) + n + + +def iter_fields(node): + """Iterate over all fields of a node, only yielding existing fields.""" + # CPython 2.5 compat + if not hasattr(node, '_fields') or not node._fields: + return + for field in node._fields: + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +def get_fields(node): + """Like `iter_fields` but returns a dict.""" + return dict(iter_fields(node)) + + +def iter_child_nodes(node): + """Iterate over all child nodes or a node.""" + for name, field in iter_fields(node): + if isinstance(field, AST): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, AST): + yield item + + +def get_child_nodes(node): + """Like `iter_child_nodes` but returns a list.""" + return list(iter_child_nodes(node)) + + +def get_compile_mode(node): + """ + Get the mode for `compile` of a given node. If the node is not a `mod` + node (`Expression`, `Module` etc.) a `TypeError` is thrown. + """ + if not isinstance(node, mod): + raise TypeError('expected mod node, got %r' % node.__class__.__name__) + return { + Expression: 'eval', + Interactive: 'single' + }.get(node.__class__, 'expr') + + +def get_docstring(node): + """ + Return the docstring for the given node or `None` if no docstring can be + found. If the node provided does not accept docstrings a `TypeError` + will be raised. + """ + if not isinstance(node, (FunctionDef, ClassDef, Module)): + raise TypeError("%r can't have docstrings" % node.__class__.__name__) + if node.body and isinstance(node.body[0], Str): + return node.body[0].s + + +def walk(node): + """ + Iterate over all nodes. This is useful if you only want to modify nodes in + place and don't care about the context or the order the nodes are returned. + """ + from collections import deque + todo = deque([node]) + while todo: + node = todo.popleft() + todo.extend(iter_child_nodes(node)) + yield node + + +class NodeVisitor(object): + + """ + Walks the abstract syntax tree and call visitor functions for every node + found. The visitor functions may return values which will be forwarded + by the `visit` method. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `get_visitor` function. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + """ + + def get_visitor(self, node): + """ + Return the visitor function for this node or `None` if no visitor + exists for this node. In that case the generic visit function is + used instead. + """ + method = 'visit_' + node.__class__.__name__ + return getattr(self, method, None) + + def visit(self, node): + """Visit a node.""" + f = self.get_visitor(node) + if f is not None: + return f(node) + return self.generic_visit(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, AST): + self.visit(item) + elif isinstance(value, AST): + self.visit(value) + + +class NodeTransformer(NodeVisitor): + + """ + Walks the abstract syntax tree and allows modifications of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor functions to replace or remove the old node. If the return + value of the visitor function is `None` the node will be removed + from the previous location otherwise it's replaced with the return + value. The return value may be the original node in which case no + replacement takes place. + + Here an example transformer that rewrites all `foo` to `data['foo']`:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return copy_location(Subscript( + value=Name(id='data', ctx=Load()), + slice=Index(value=Str(s=node.id)), + ctx=node.ctx + ), node) + + Keep in mind that if the node you're operating on has child nodes + you must either transform the child nodes yourself or call the generic + visit function for the node first. + + Nodes that were part of a collection of statements (that applies to + all statement nodes) may also return a list of nodes rather than just + a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + """ + + def generic_visit(self, node): + for field, old_value in iter_fields(node): + old_value = getattr(node, field, None) + if isinstance(old_value, list): + new_values = [] + for value in old_value: + if isinstance(value, AST): + value = self.visit(value) + if value is None: + continue + elif not isinstance(value, AST): + new_values.extend(value) + continue + new_values.append(value) + old_value[:] = new_values + elif isinstance(old_value, AST): + new_node = self.visit(old_value) + if new_node is None: + delattr(node, field) + else: + setattr(node, field, new_node) + return node + + +class SourceGenerator(NodeVisitor): + + """ + This visitor is able to transform a well formed syntax tree into python + sourcecode. For more details have a look at the docstring of the + `node_to_source` function. + """ + + def __init__(self, indent_with): + self.result = [] + self.indent_with = indent_with + self.indentation = 0 + self.new_lines = 0 + + def write(self, x): + if self.new_lines: + if self.result: + self.result.append('\n' * self.new_lines) + self.result.append(self.indent_with * self.indentation) + self.new_lines = 0 + self.result.append(x) + + def newline(self, n=1): + self.new_lines = max(self.new_lines, n) + + def body(self, statements): + self.new_line = True + self.indentation += 1 + for stmt in statements: + self.visit(stmt) + self.indentation -= 1 + + def body_or_else(self, node): + self.body(node.body) + if node.orelse: + self.newline() + self.write('else:') + self.body(node.orelse) + + def signature(self, node): + want_comma = [] + + def write_comma(): + if want_comma: + self.write(', ') + else: + want_comma.append(True) + + padding = [None] * (len(node.args) - len(node.defaults)) + for arg, default in zip(node.args, padding + node.defaults): + write_comma() + self.visit(arg) + if default is not None: + self.write('=') + self.visit(default) + if node.vararg is not None: + write_comma() + self.write('*' + arg_stringname(node.vararg)) + if node.kwarg is not None: + write_comma() + self.write('**' + arg_stringname(node.kwarg)) + + def decorators(self, node): + for decorator in node.decorator_list: + self.newline() + self.write('@') + self.visit(decorator) + + # Statements + + def visit_Assign(self, node): + self.newline() + for idx, target in enumerate(node.targets): + if idx: + self.write(', ') + self.visit(target) + self.write(' = ') + self.visit(node.value) + + def visit_AugAssign(self, node): + self.newline() + self.visit(node.target) + self.write(BINOP_SYMBOLS[type(node.op)] + '=') + self.visit(node.value) + + def visit_ImportFrom(self, node): + self.newline() + self.write('from %s%s import ' % ('.' * node.level, node.module)) + for idx, item in enumerate(node.names): + if idx: + self.write(', ') + self.write(item) + + def visit_Import(self, node): + self.newline() + for item in node.names: + self.write('import ') + self.visit(item) + + def visit_Expr(self, node): + self.newline() + self.generic_visit(node) + + def visit_FunctionDef(self, node): + self.newline(n=2) + self.decorators(node) + self.newline() + self.write('def %s(' % node.name) + self.signature(node.args) + self.write('):') + self.body(node.body) + + def visit_ClassDef(self, node): + have_args = [] + + def paren_or_comma(): + if have_args: + self.write(', ') + else: + have_args.append(True) + self.write('(') + + self.newline(n=3) + self.decorators(node) + self.newline() + self.write('class %s' % node.name) + for base in node.bases: + paren_or_comma() + self.visit(base) + # XXX: the if here is used to keep this module compatible + # with python 2.6. + if hasattr(node, 'keywords'): + for keyword in node.keywords: + paren_or_comma() + self.write(keyword.arg + '=') + self.visit(keyword.value) + if getattr(node, "starargs", None): + paren_or_comma() + self.write('*') + self.visit(node.starargs) + if getattr(node, "kwargs", None): + paren_or_comma() + self.write('**') + self.visit(node.kwargs) + self.write(have_args and '):' or ':') + self.body(node.body) + + def visit_If(self, node): + self.newline() + self.write('if ') + self.visit(node.test) + self.write(':') + self.body(node.body) + while True: + else_ = node.orelse + if len(else_) == 1 and isinstance(else_[0], If): + node = else_[0] + self.newline() + self.write('elif ') + self.visit(node.test) + self.write(':') + self.body(node.body) + else: + self.newline() + self.write('else:') + self.body(else_) + break + + def visit_For(self, node): + self.newline() + self.write('for ') + self.visit(node.target) + self.write(' in ') + self.visit(node.iter) + self.write(':') + self.body_or_else(node) + + def visit_While(self, node): + self.newline() + self.write('while ') + self.visit(node.test) + self.write(':') + self.body_or_else(node) + + def visit_With(self, node): + self.newline() + self.write('with ') + self.visit(node.context_expr) + if node.optional_vars is not None: + self.write(' as ') + self.visit(node.optional_vars) + self.write(':') + self.body(node.body) + + def visit_Pass(self, node): + self.newline() + self.write('pass') + + def visit_Print(self, node): + # XXX: python 2.6 only + self.newline() + self.write('print ') + want_comma = False + if node.dest is not None: + self.write(' >> ') + self.visit(node.dest) + want_comma = True + for value in node.values: + if want_comma: + self.write(', ') + self.visit(value) + want_comma = True + if not node.nl: + self.write(',') + + def visit_Delete(self, node): + self.newline() + self.write('del ') + for idx, target in enumerate(node): + if idx: + self.write(', ') + self.visit(target) + + def visit_TryExcept(self, node): + self.newline() + self.write('try:') + self.body(node.body) + for handler in node.handlers: + self.visit(handler) + + def visit_TryFinally(self, node): + self.newline() + self.write('try:') + self.body(node.body) + self.newline() + self.write('finally:') + self.body(node.finalbody) + + def visit_Global(self, node): + self.newline() + self.write('global ' + ', '.join(node.names)) + + def visit_Nonlocal(self, node): + self.newline() + self.write('nonlocal ' + ', '.join(node.names)) + + def visit_Return(self, node): + self.newline() + self.write('return ') + self.visit(node.value) + + def visit_Break(self, node): + self.newline() + self.write('break') + + def visit_Continue(self, node): + self.newline() + self.write('continue') + + def visit_Raise(self, node): + # XXX: Python 2.6 / 3.0 compatibility + self.newline() + self.write('raise') + if hasattr(node, 'exc') and node.exc is not None: + self.write(' ') + self.visit(node.exc) + if node.cause is not None: + self.write(' from ') + self.visit(node.cause) + elif hasattr(node, 'type') and node.type is not None: + self.visit(node.type) + if node.inst is not None: + self.write(', ') + self.visit(node.inst) + if node.tback is not None: + self.write(', ') + self.visit(node.tback) + + # Expressions + + def visit_Attribute(self, node): + self.visit(node.value) + self.write('.' + node.attr) + + def visit_Call(self, node): + want_comma = [] + + def write_comma(): + if want_comma: + self.write(', ') + else: + want_comma.append(True) + + self.visit(node.func) + self.write('(') + for arg in node.args: + write_comma() + self.visit(arg) + for keyword in node.keywords: + write_comma() + self.write(keyword.arg + '=') + self.visit(keyword.value) + if getattr(node, "starargs", None): + write_comma() + self.write('*') + self.visit(node.starargs) + if getattr(node, "kwargs", None): + write_comma() + self.write('**') + self.visit(node.kwargs) + self.write(')') + + def visit_Name(self, node): + self.write(node.id) + + def visit_NameConstant(self, node): + self.write(str(node.value)) + + def visit_arg(self, node): + self.write(node.arg) + + def visit_Str(self, node): + self.write(repr(node.s)) + + def visit_Bytes(self, node): + self.write(repr(node.s)) + + def visit_Num(self, node): + self.write(repr(node.n)) + + def visit_Tuple(self, node): + self.write('(') + idx = -1 + for idx, item in enumerate(node.elts): + if idx: + self.write(', ') + self.visit(item) + self.write(idx and ')' or ',)') + + def sequence_visit(left, right): + def visit(self, node): + self.write(left) + for idx, item in enumerate(node.elts): + if idx: + self.write(', ') + self.visit(item) + self.write(right) + return visit + + visit_List = sequence_visit('[', ']') + visit_Set = sequence_visit('{', '}') + del sequence_visit + + def visit_Dict(self, node): + self.write('{') + for idx, (key, value) in enumerate(zip(node.keys, node.values)): + if idx: + self.write(', ') + self.visit(key) + self.write(': ') + self.visit(value) + self.write('}') + + def visit_BinOp(self, node): + self.write('(') + self.visit(node.left) + self.write(' %s ' % BINOP_SYMBOLS[type(node.op)]) + self.visit(node.right) + self.write(')') + + def visit_BoolOp(self, node): + self.write('(') + for idx, value in enumerate(node.values): + if idx: + self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)]) + self.visit(value) + self.write(')') + + def visit_Compare(self, node): + self.write('(') + self.visit(node.left) + for op, right in zip(node.ops, node.comparators): + self.write(' %s ' % CMPOP_SYMBOLS[type(op)]) + self.visit(right) + self.write(')') + + def visit_UnaryOp(self, node): + self.write('(') + op = UNARYOP_SYMBOLS[type(node.op)] + self.write(op) + if op == 'not': + self.write(' ') + self.visit(node.operand) + self.write(')') + + def visit_Subscript(self, node): + self.visit(node.value) + self.write('[') + self.visit(node.slice) + self.write(']') + + def visit_Slice(self, node): + if node.lower is not None: + self.visit(node.lower) + self.write(':') + if node.upper is not None: + self.visit(node.upper) + if node.step is not None: + self.write(':') + if not (isinstance(node.step, Name) and node.step.id == 'None'): + self.visit(node.step) + + def visit_ExtSlice(self, node): + for idx, item in node.dims: + if idx: + self.write(', ') + self.visit(item) + + def visit_Yield(self, node): + self.write('yield ') + self.visit(node.value) + + def visit_Lambda(self, node): + self.write('lambda ') + self.signature(node.args) + self.write(': ') + self.visit(node.body) + + def visit_Ellipsis(self, node): + self.write('Ellipsis') + + def generator_visit(left, right): + def visit(self, node): + self.write(left) + self.visit(node.elt) + for comprehension in node.generators: + self.visit(comprehension) + self.write(right) + return visit + + visit_ListComp = generator_visit('[', ']') + visit_GeneratorExp = generator_visit('(', ')') + visit_SetComp = generator_visit('{', '}') + del generator_visit + + def visit_DictComp(self, node): + self.write('{') + self.visit(node.key) + self.write(': ') + self.visit(node.value) + for comprehension in node.generators: + self.visit(comprehension) + self.write('}') + + def visit_IfExp(self, node): + self.visit(node.body) + self.write(' if ') + self.visit(node.test) + self.write(' else ') + self.visit(node.orelse) + + def visit_Starred(self, node): + self.write('*') + self.visit(node.value) + + def visit_Repr(self, node): + # XXX: python 2.6 only + self.write('`') + self.visit(node.value) + self.write('`') + + # Helper Nodes + + def visit_alias(self, node): + self.write(node.name) + if node.asname is not None: + self.write(' as ' + node.asname) + + def visit_comprehension(self, node): + self.write(' for ') + self.visit(node.target) + self.write(' in ') + self.visit(node.iter) + if node.ifs: + for if_ in node.ifs: + self.write(' if ') + self.visit(if_) + + def visit_excepthandler(self, node): + self.newline() + self.write('except') + if node.type is not None: + self.write(' ') + self.visit(node.type) + if node.name is not None: + self.write(' as ') + self.visit(node.name) + self.write(':') + self.body(node.body) diff --git a/server/www/packages/packages-windows/x86/mako/ast.py b/server/www/packages/packages-windows/x86/mako/ast.py new file mode 100644 index 0000000..8d2d150 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ast.py @@ -0,0 +1,191 @@ +# mako/ast.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""utilities for analyzing expressions and blocks of Python +code, as well as generating Python from AST nodes""" + +from mako import exceptions, pyparser, compat +import re + + +class PythonCode(object): + + """represents information about a string containing Python code""" + + def __init__(self, code, **exception_kwargs): + self.code = code + + # represents all identifiers which are assigned to at some point in + # the code + self.declared_identifiers = set() + + # represents all identifiers which are referenced before their + # assignment, if any + self.undeclared_identifiers = set() + + # note that an identifier can be in both the undeclared and declared + # lists. + + # using AST to parse instead of using code.co_varnames, + # code.co_names has several advantages: + # - we can locate an identifier as "undeclared" even if + # its declared later in the same block of code + # - AST is less likely to break with version changes + # (for example, the behavior of co_names changed a little bit + # in python version 2.5) + if isinstance(code, compat.string_types): + expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs) + else: + expr = code + + f = pyparser.FindIdentifiers(self, **exception_kwargs) + f.visit(expr) + + +class ArgumentList(object): + + """parses a fragment of code as a comma-separated list of expressions""" + + def __init__(self, code, **exception_kwargs): + self.codeargs = [] + self.args = [] + self.declared_identifiers = set() + self.undeclared_identifiers = set() + if isinstance(code, compat.string_types): + if re.match(r"\S", code) and not re.match(r",\s*$", code): + # if theres text and no trailing comma, insure its parsed + # as a tuple by adding a trailing comma + code += "," + expr = pyparser.parse(code, "exec", **exception_kwargs) + else: + expr = code + + f = pyparser.FindTuple(self, PythonCode, **exception_kwargs) + f.visit(expr) + + +class PythonFragment(PythonCode): + + """extends PythonCode to provide identifier lookups in partial control + statements + + e.g. + for x in 5: + elif y==9: + except (MyException, e): + etc. + """ + + def __init__(self, code, **exception_kwargs): + m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S) + if not m: + raise exceptions.CompileException( + "Fragment '%s' is not a partial control statement" % + code, **exception_kwargs) + if m.group(3): + code = code[:m.start(3)] + (keyword, expr) = m.group(1, 2) + if keyword in ['for', 'if', 'while']: + code = code + "pass" + elif keyword == 'try': + code = code + "pass\nexcept:pass" + elif keyword == 'elif' or keyword == 'else': + code = "if False:pass\n" + code + "pass" + elif keyword == 'except': + code = "try:pass\n" + code + "pass" + elif keyword == 'with': + code = code + "pass" + else: + raise exceptions.CompileException( + "Unsupported control keyword: '%s'" % + keyword, **exception_kwargs) + super(PythonFragment, self).__init__(code, **exception_kwargs) + + +class FunctionDecl(object): + + """function declaration""" + + def __init__(self, code, allow_kwargs=True, **exception_kwargs): + self.code = code + expr = pyparser.parse(code, "exec", **exception_kwargs) + + f = pyparser.ParseFunc(self, **exception_kwargs) + f.visit(expr) + if not hasattr(self, 'funcname'): + raise exceptions.CompileException( + "Code '%s' is not a function declaration" % code, + **exception_kwargs) + if not allow_kwargs and self.kwargs: + raise exceptions.CompileException( + "'**%s' keyword argument not allowed here" % + self.kwargnames[-1], **exception_kwargs) + + def get_argument_expressions(self, as_call=False): + """Return the argument declarations of this FunctionDecl as a printable + list. + + By default the return value is appropriate for writing in a ``def``; + set `as_call` to true to build arguments to be passed to the function + instead (assuming locals with the same names as the arguments exist). + """ + + namedecls = [] + + # Build in reverse order, since defaults and slurpy args come last + argnames = self.argnames[::-1] + kwargnames = self.kwargnames[::-1] + defaults = self.defaults[::-1] + kwdefaults = self.kwdefaults[::-1] + + # Named arguments + if self.kwargs: + namedecls.append("**" + kwargnames.pop(0)) + + for name in kwargnames: + # Keyword-only arguments must always be used by name, so even if + # this is a call, print out `foo=foo` + if as_call: + namedecls.append("%s=%s" % (name, name)) + elif kwdefaults: + default = kwdefaults.pop(0) + if default is None: + # The AST always gives kwargs a default, since you can do + # `def foo(*, a=1, b, c=3)` + namedecls.append(name) + else: + namedecls.append("%s=%s" % ( + name, pyparser.ExpressionGenerator(default).value())) + else: + namedecls.append(name) + + # Positional arguments + if self.varargs: + namedecls.append("*" + argnames.pop(0)) + + for name in argnames: + if as_call or not defaults: + namedecls.append(name) + else: + default = defaults.pop(0) + namedecls.append("%s=%s" % ( + name, pyparser.ExpressionGenerator(default).value())) + + namedecls.reverse() + return namedecls + + @property + def allargnames(self): + return tuple(self.argnames) + tuple(self.kwargnames) + + +class FunctionArgs(FunctionDecl): + + """the argument portion of a function declaration""" + + def __init__(self, code, **kwargs): + super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, + **kwargs) diff --git a/server/www/packages/packages-windows/x86/mako/cache.py b/server/www/packages/packages-windows/x86/mako/cache.py new file mode 100644 index 0000000..1af17dd --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/cache.py @@ -0,0 +1,240 @@ +# mako/cache.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from mako import compat, util + +_cache_plugins = util.PluginLoader("mako.cache") + +register_plugin = _cache_plugins.register +register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl") + + +class Cache(object): + + """Represents a data content cache made available to the module + space of a specific :class:`.Template` object. + + .. versionadded:: 0.6 + :class:`.Cache` by itself is mostly a + container for a :class:`.CacheImpl` object, which implements + a fixed API to provide caching services; specific subclasses exist to + implement different + caching strategies. Mako includes a backend that works with + the Beaker caching system. Beaker itself then supports + a number of backends (i.e. file, memory, memcached, etc.) + + The construction of a :class:`.Cache` is part of the mechanics + of a :class:`.Template`, and programmatic access to this + cache is typically via the :attr:`.Template.cache` attribute. + + """ + + impl = None + """Provide the :class:`.CacheImpl` in use by this :class:`.Cache`. + + This accessor allows a :class:`.CacheImpl` with additional + methods beyond that of :class:`.Cache` to be used programmatically. + + """ + + id = None + """Return the 'id' that identifies this cache. + + This is a value that should be globally unique to the + :class:`.Template` associated with this cache, and can + be used by a caching system to name a local container + for data specific to this template. + + """ + + starttime = None + """Epochal time value for when the owning :class:`.Template` was + first compiled. + + A cache implementation may wish to invalidate data earlier than + this timestamp; this has the effect of the cache for a specific + :class:`.Template` starting clean any time the :class:`.Template` + is recompiled, such as when the original template file changed on + the filesystem. + + """ + + def __init__(self, template, *args): + # check for a stale template calling the + # constructor + if isinstance(template, compat.string_types) and args: + return + self.template = template + self.id = template.module.__name__ + self.starttime = template.module._modified_time + self._def_regions = {} + self.impl = self._load_impl(self.template.cache_impl) + + def _load_impl(self, name): + return _cache_plugins.load(name)(self) + + def get_or_create(self, key, creation_function, **kw): + """Retrieve a value from the cache, using the given creation function + to generate a new value.""" + + return self._ctx_get_or_create(key, creation_function, None, **kw) + + def _ctx_get_or_create(self, key, creation_function, context, **kw): + """Retrieve a value from the cache, using the given creation function + to generate a new value.""" + + if not self.template.cache_enabled: + return creation_function() + + return self.impl.get_or_create( + key, + creation_function, + **self._get_cache_kw(kw, context)) + + def set(self, key, value, **kw): + r"""Place a value in the cache. + + :param key: the value's key. + :param value: the value. + :param \**kw: cache configuration arguments. + + """ + + self.impl.set(key, value, **self._get_cache_kw(kw, None)) + + put = set + """A synonym for :meth:`.Cache.set`. + + This is here for backwards compatibility. + + """ + + def get(self, key, **kw): + r"""Retrieve a value from the cache. + + :param key: the value's key. + :param \**kw: cache configuration arguments. The + backend is configured using these arguments upon first request. + Subsequent requests that use the same series of configuration + values will use that same backend. + + """ + return self.impl.get(key, **self._get_cache_kw(kw, None)) + + def invalidate(self, key, **kw): + r"""Invalidate a value in the cache. + + :param key: the value's key. + :param \**kw: cache configuration arguments. The + backend is configured using these arguments upon first request. + Subsequent requests that use the same series of configuration + values will use that same backend. + + """ + self.impl.invalidate(key, **self._get_cache_kw(kw, None)) + + def invalidate_body(self): + """Invalidate the cached content of the "body" method for this + template. + + """ + self.invalidate('render_body', __M_defname='render_body') + + def invalidate_def(self, name): + """Invalidate the cached content of a particular ``<%def>`` within this + template. + + """ + + self.invalidate('render_%s' % name, __M_defname='render_%s' % name) + + def invalidate_closure(self, name): + """Invalidate a nested ``<%def>`` within this template. + + Caching of nested defs is a blunt tool as there is no + management of scope -- nested defs that use cache tags + need to have names unique of all other nested defs in the + template, else their content will be overwritten by + each other. + + """ + + self.invalidate(name, __M_defname=name) + + def _get_cache_kw(self, kw, context): + defname = kw.pop('__M_defname', None) + if not defname: + tmpl_kw = self.template.cache_args.copy() + tmpl_kw.update(kw) + elif defname in self._def_regions: + tmpl_kw = self._def_regions[defname] + else: + tmpl_kw = self.template.cache_args.copy() + tmpl_kw.update(kw) + self._def_regions[defname] = tmpl_kw + if context and self.impl.pass_context: + tmpl_kw = tmpl_kw.copy() + tmpl_kw.setdefault('context', context) + return tmpl_kw + + +class CacheImpl(object): + + """Provide a cache implementation for use by :class:`.Cache`.""" + + def __init__(self, cache): + self.cache = cache + + pass_context = False + """If ``True``, the :class:`.Context` will be passed to + :meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``. + """ + + def get_or_create(self, key, creation_function, **kw): + r"""Retrieve a value from the cache, using the given creation function + to generate a new value. + + This function *must* return a value, either from + the cache, or via the given creation function. + If the creation function is called, the newly + created value should be populated into the cache + under the given key before being returned. + + :param key: the value's key. + :param creation_function: function that when called generates + a new value. + :param \**kw: cache configuration arguments. + + """ + raise NotImplementedError() + + def set(self, key, value, **kw): + r"""Place a value in the cache. + + :param key: the value's key. + :param value: the value. + :param \**kw: cache configuration arguments. + + """ + raise NotImplementedError() + + def get(self, key, **kw): + r"""Retrieve a value from the cache. + + :param key: the value's key. + :param \**kw: cache configuration arguments. + + """ + raise NotImplementedError() + + def invalidate(self, key, **kw): + r"""Invalidate a value in the cache. + + :param key: the value's key. + :param \**kw: cache configuration arguments. + + """ + raise NotImplementedError() diff --git a/server/www/packages/packages-windows/x86/mako/cmd.py b/server/www/packages/packages-windows/x86/mako/cmd.py new file mode 100644 index 0000000..8db1346 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/cmd.py @@ -0,0 +1,67 @@ +# mako/cmd.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +from argparse import ArgumentParser +from os.path import isfile, dirname +import sys +from mako.template import Template +from mako.lookup import TemplateLookup +from mako import exceptions + + +def varsplit(var): + if "=" not in var: + return (var, "") + return var.split("=", 1) + + +def _exit(): + sys.stderr.write(exceptions.text_error_template().render()) + sys.exit(1) + + +def cmdline(argv=None): + + parser = ArgumentParser("usage: %prog [FILENAME]") + parser.add_argument( + "--var", default=[], action="append", + help="variable (can be used multiple times, use name=value)") + parser.add_argument( + "--template-dir", default=[], action="append", + help="Directory to use for template lookup (multiple " + "directories may be provided). If not given then if the " + "template is read from stdin, the value defaults to be " + "the current directory, otherwise it defaults to be the " + "parent directory of the file provided.") + parser.add_argument('input', nargs='?', default='-') + + options = parser.parse_args(argv) + if options.input == '-': + lookup_dirs = options.template_dir or ["."] + lookup = TemplateLookup(lookup_dirs) + try: + template = Template(sys.stdin.read(), lookup=lookup) + except: + _exit() + else: + filename = options.input + if not isfile(filename): + raise SystemExit("error: can't find %s" % filename) + lookup_dirs = options.template_dir or [dirname(filename)] + lookup = TemplateLookup(lookup_dirs) + try: + template = Template(filename=filename, lookup=lookup) + except: + _exit() + + kw = dict([varsplit(var) for var in options.var]) + try: + sys.stdout.write(template.render(**kw)) + except: + _exit() + + +if __name__ == "__main__": + cmdline() diff --git a/server/www/packages/packages-windows/x86/mako/codegen.py b/server/www/packages/packages-windows/x86/mako/codegen.py new file mode 100644 index 0000000..d4ecbe8 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/codegen.py @@ -0,0 +1,1255 @@ +# mako/codegen.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""provides functionality for rendering a parsetree constructing into module +source code.""" + +import time +import re +from mako.pygen import PythonPrinter +from mako import util, ast, parsetree, filters, exceptions +from mako import compat + + +MAGIC_NUMBER = 10 + +# names which are hardwired into the +# template and are not accessed via the +# context itself +TOPLEVEL_DECLARED = set(["UNDEFINED", "STOP_RENDERING"]) +RESERVED_NAMES = set(['context', 'loop']).union(TOPLEVEL_DECLARED) + + +def compile(node, + uri, + filename=None, + default_filters=None, + buffer_filters=None, + imports=None, + future_imports=None, + source_encoding=None, + generate_magic_comment=True, + disable_unicode=False, + strict_undefined=False, + enable_loop=True, + reserved_names=frozenset()): + """Generate module source code given a parsetree node, + uri, and optional source filename""" + + # if on Py2K, push the "source_encoding" string to be + # a bytestring itself, as we will be embedding it into + # the generated source and we don't want to coerce the + # result into a unicode object, in "disable_unicode" mode + if not compat.py3k and isinstance(source_encoding, compat.text_type): + source_encoding = source_encoding.encode(source_encoding) + + buf = util.FastEncodingBuffer() + + printer = PythonPrinter(buf) + _GenerateRenderMethod(printer, + _CompileContext(uri, + filename, + default_filters, + buffer_filters, + imports, + future_imports, + source_encoding, + generate_magic_comment, + disable_unicode, + strict_undefined, + enable_loop, + reserved_names), + node) + return buf.getvalue() + + +class _CompileContext(object): + + def __init__(self, + uri, + filename, + default_filters, + buffer_filters, + imports, + future_imports, + source_encoding, + generate_magic_comment, + disable_unicode, + strict_undefined, + enable_loop, + reserved_names): + self.uri = uri + self.filename = filename + self.default_filters = default_filters + self.buffer_filters = buffer_filters + self.imports = imports + self.future_imports = future_imports + self.source_encoding = source_encoding + self.generate_magic_comment = generate_magic_comment + self.disable_unicode = disable_unicode + self.strict_undefined = strict_undefined + self.enable_loop = enable_loop + self.reserved_names = reserved_names + + +class _GenerateRenderMethod(object): + + """A template visitor object which generates the + full module source for a template. + + """ + + def __init__(self, printer, compiler, node): + self.printer = printer + self.compiler = compiler + self.node = node + self.identifier_stack = [None] + self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag)) + + if self.in_def: + name = "render_%s" % node.funcname + args = node.get_argument_expressions() + filtered = len(node.filter_args.args) > 0 + buffered = eval(node.attributes.get('buffered', 'False')) + cached = eval(node.attributes.get('cached', 'False')) + defs = None + pagetag = None + if node.is_block and not node.is_anonymous: + args += ['**pageargs'] + else: + defs = self.write_toplevel() + pagetag = self.compiler.pagetag + name = "render_body" + if pagetag is not None: + args = pagetag.body_decl.get_argument_expressions() + if not pagetag.body_decl.kwargs: + args += ['**pageargs'] + cached = eval(pagetag.attributes.get('cached', 'False')) + self.compiler.enable_loop = self.compiler.enable_loop or eval( + pagetag.attributes.get( + 'enable_loop', 'False') + ) + else: + args = ['**pageargs'] + cached = False + buffered = filtered = False + if args is None: + args = ['context'] + else: + args = [a for a in ['context'] + args] + + self.write_render_callable( + pagetag or node, + name, args, + buffered, filtered, cached) + + if defs is not None: + for node in defs: + _GenerateRenderMethod(printer, compiler, node) + + if not self.in_def: + self.write_metadata_struct() + + def write_metadata_struct(self): + self.printer.source_map[self.printer.lineno] = \ + max(self.printer.source_map) + struct = { + "filename": self.compiler.filename, + "uri": self.compiler.uri, + "source_encoding": self.compiler.source_encoding, + "line_map": self.printer.source_map, + } + self.printer.writelines( + '"""', + '__M_BEGIN_METADATA', + compat.json.dumps(struct), + '__M_END_METADATA\n' + '"""' + ) + + @property + def identifiers(self): + return self.identifier_stack[-1] + + def write_toplevel(self): + """Traverse a template structure for module-level directives and + generate the start of module-level code. + + """ + inherit = [] + namespaces = {} + module_code = [] + + self.compiler.pagetag = None + + class FindTopLevel(object): + + def visitInheritTag(s, node): + inherit.append(node) + + def visitNamespaceTag(s, node): + namespaces[node.name] = node + + def visitPageTag(s, node): + self.compiler.pagetag = node + + def visitCode(s, node): + if node.ismodule: + module_code.append(node) + + f = FindTopLevel() + for n in self.node.nodes: + n.accept_visitor(f) + + self.compiler.namespaces = namespaces + + module_ident = set() + for n in module_code: + module_ident = module_ident.union(n.declared_identifiers()) + + module_identifiers = _Identifiers(self.compiler) + module_identifiers.declared = module_ident + + # module-level names, python code + if self.compiler.generate_magic_comment and \ + self.compiler.source_encoding: + self.printer.writeline("# -*- coding:%s -*-" % + self.compiler.source_encoding) + + if self.compiler.future_imports: + self.printer.writeline("from __future__ import %s" % + (", ".join(self.compiler.future_imports),)) + self.printer.writeline("from mako import runtime, filters, cache") + self.printer.writeline("UNDEFINED = runtime.UNDEFINED") + self.printer.writeline("STOP_RENDERING = runtime.STOP_RENDERING") + self.printer.writeline("__M_dict_builtin = dict") + self.printer.writeline("__M_locals_builtin = locals") + self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER) + self.printer.writeline("_modified_time = %r" % time.time()) + self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop) + self.printer.writeline( + "_template_filename = %r" % self.compiler.filename) + self.printer.writeline("_template_uri = %r" % self.compiler.uri) + self.printer.writeline( + "_source_encoding = %r" % self.compiler.source_encoding) + if self.compiler.imports: + buf = '' + for imp in self.compiler.imports: + buf += imp + "\n" + self.printer.writeline(imp) + impcode = ast.PythonCode( + buf, + source='', lineno=0, + pos=0, + filename='template defined imports') + else: + impcode = None + + main_identifiers = module_identifiers.branch(self.node) + module_identifiers.topleveldefs = \ + module_identifiers.topleveldefs.\ + union(main_identifiers.topleveldefs) + module_identifiers.declared.update(TOPLEVEL_DECLARED) + if impcode: + module_identifiers.declared.update(impcode.declared_identifiers) + + self.compiler.identifiers = module_identifiers + self.printer.writeline("_exports = %r" % + [n.name for n in + main_identifiers.topleveldefs.values()] + ) + self.printer.write_blanks(2) + + if len(module_code): + self.write_module_code(module_code) + + if len(inherit): + self.write_namespaces(namespaces) + self.write_inherit(inherit[-1]) + elif len(namespaces): + self.write_namespaces(namespaces) + + return list(main_identifiers.topleveldefs.values()) + + def write_render_callable(self, node, name, args, buffered, filtered, + cached): + """write a top-level render callable. + + this could be the main render() method or that of a top-level def.""" + + if self.in_def: + decorator = node.decorator + if decorator: + self.printer.writeline( + "@runtime._decorate_toplevel(%s)" % decorator) + + self.printer.start_source(node.lineno) + self.printer.writelines( + "def %s(%s):" % (name, ','.join(args)), + # push new frame, assign current frame to __M_caller + "__M_caller = context.caller_stack._push_frame()", + "try:" + ) + if buffered or filtered or cached: + self.printer.writeline("context._push_buffer()") + + self.identifier_stack.append( + self.compiler.identifiers.branch(self.node)) + if (not self.in_def or self.node.is_block) and '**pageargs' in args: + self.identifier_stack[-1].argument_declared.add('pageargs') + + if not self.in_def and ( + len(self.identifiers.locally_assigned) > 0 or + len(self.identifiers.argument_declared) > 0 + ): + self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % + ','.join([ + "%s=%s" % (x, x) for x in + self.identifiers.argument_declared + ])) + + self.write_variable_declares(self.identifiers, toplevel=True) + + for n in self.node.nodes: + n.accept_visitor(self) + + self.write_def_finish(self.node, buffered, filtered, cached) + self.printer.writeline(None) + self.printer.write_blanks(2) + if cached: + self.write_cache_decorator( + node, name, + args, buffered, + self.identifiers, toplevel=True) + + def write_module_code(self, module_code): + """write module-level template code, i.e. that which + is enclosed in <%! %> tags in the template.""" + for n in module_code: + self.printer.start_source(n.lineno) + self.printer.write_indented_block(n.text) + + def write_inherit(self, node): + """write the module-level inheritance-determination callable.""" + + self.printer.writelines( + "def _mako_inherit(template, context):", + "_mako_generate_namespaces(context)", + "return runtime._inherit_from(context, %s, _template_uri)" % + (node.parsed_attributes['file']), + None + ) + + def write_namespaces(self, namespaces): + """write the module-level namespace-generating callable.""" + self.printer.writelines( + "def _mako_get_namespace(context, name):", + "try:", + "return context.namespaces[(__name__, name)]", + "except KeyError:", + "_mako_generate_namespaces(context)", + "return context.namespaces[(__name__, name)]", + None, None + ) + self.printer.writeline("def _mako_generate_namespaces(context):") + + for node in namespaces.values(): + if 'import' in node.attributes: + self.compiler.has_ns_imports = True + self.printer.start_source(node.lineno) + if len(node.nodes): + self.printer.writeline("def make_namespace():") + export = [] + identifiers = self.compiler.identifiers.branch(node) + self.in_def = True + + class NSDefVisitor(object): + + def visitDefTag(s, node): + s.visitDefOrBase(node) + + def visitBlockTag(s, node): + s.visitDefOrBase(node) + + def visitDefOrBase(s, node): + if node.is_anonymous: + raise exceptions.CompileException( + "Can't put anonymous blocks inside " + "<%namespace>", + **node.exception_kwargs + ) + self.write_inline_def(node, identifiers, nested=False) + export.append(node.funcname) + vis = NSDefVisitor() + for n in node.nodes: + n.accept_visitor(vis) + self.printer.writeline("return [%s]" % (','.join(export))) + self.printer.writeline(None) + self.in_def = False + callable_name = "make_namespace()" + else: + callable_name = "None" + + if 'file' in node.parsed_attributes: + self.printer.writeline( + "ns = runtime.TemplateNamespace(%r," + " context._clean_inheritance_tokens()," + " templateuri=%s, callables=%s, " + " calling_uri=_template_uri)" % + ( + node.name, + node.parsed_attributes.get('file', 'None'), + callable_name, + ) + ) + elif 'module' in node.parsed_attributes: + self.printer.writeline( + "ns = runtime.ModuleNamespace(%r," + " context._clean_inheritance_tokens()," + " callables=%s, calling_uri=_template_uri," + " module=%s)" % + ( + node.name, + callable_name, + node.parsed_attributes.get( + 'module', 'None') + ) + ) + else: + self.printer.writeline( + "ns = runtime.Namespace(%r," + " context._clean_inheritance_tokens()," + " callables=%s, calling_uri=_template_uri)" % + ( + node.name, + callable_name, + ) + ) + if eval(node.attributes.get('inheritable', "False")): + self.printer.writeline("context['self'].%s = ns" % (node.name)) + + self.printer.writeline( + "context.namespaces[(__name__, %s)] = ns" % repr(node.name)) + self.printer.write_blanks(1) + if not len(namespaces): + self.printer.writeline("pass") + self.printer.writeline(None) + + def write_variable_declares(self, identifiers, toplevel=False, limit=None): + """write variable declarations at the top of a function. + + the variable declarations are in the form of callable + definitions for defs and/or name lookup within the + function's context argument. the names declared are based + on the names that are referenced in the function body, + which don't otherwise have any explicit assignment + operation. names that are assigned within the body are + assumed to be locally-scoped variables and are not + separately declared. + + for def callable definitions, if the def is a top-level + callable then a 'stub' callable is generated which wraps + the current Context into a closure. if the def is not + top-level, it is fully rendered as a local closure. + + """ + + # collection of all defs available to us in this scope + comp_idents = dict([(c.funcname, c) for c in identifiers.defs]) + to_write = set() + + # write "context.get()" for all variables we are going to + # need that arent in the namespace yet + to_write = to_write.union(identifiers.undeclared) + + # write closure functions for closures that we define + # right here + to_write = to_write.union( + [c.funcname for c in identifiers.closuredefs.values()]) + + # remove identifiers that are declared in the argument + # signature of the callable + to_write = to_write.difference(identifiers.argument_declared) + + # remove identifiers that we are going to assign to. + # in this way we mimic Python's behavior, + # i.e. assignment to a variable within a block + # means that variable is now a "locally declared" var, + # which cannot be referenced beforehand. + to_write = to_write.difference(identifiers.locally_declared) + + if self.compiler.enable_loop: + has_loop = "loop" in to_write + to_write.discard("loop") + else: + has_loop = False + + # if a limiting set was sent, constraint to those items in that list + # (this is used for the caching decorator) + if limit is not None: + to_write = to_write.intersection(limit) + + if toplevel and getattr(self.compiler, 'has_ns_imports', False): + self.printer.writeline("_import_ns = {}") + self.compiler.has_imports = True + for ident, ns in self.compiler.namespaces.items(): + if 'import' in ns.attributes: + self.printer.writeline( + "_mako_get_namespace(context, %r)." + "_populate(_import_ns, %r)" % + ( + ident, + re.split(r'\s*,\s*', ns.attributes['import']) + )) + + if has_loop: + self.printer.writeline( + 'loop = __M_loop = runtime.LoopStack()' + ) + + for ident in to_write: + if ident in comp_idents: + comp = comp_idents[ident] + if comp.is_block: + if not comp.is_anonymous: + self.write_def_decl(comp, identifiers) + else: + self.write_inline_def(comp, identifiers, nested=True) + else: + if comp.is_root(): + self.write_def_decl(comp, identifiers) + else: + self.write_inline_def(comp, identifiers, nested=True) + + elif ident in self.compiler.namespaces: + self.printer.writeline( + "%s = _mako_get_namespace(context, %r)" % + (ident, ident) + ) + else: + if getattr(self.compiler, 'has_ns_imports', False): + if self.compiler.strict_undefined: + self.printer.writelines( + "%s = _import_ns.get(%r, UNDEFINED)" % + (ident, ident), + "if %s is UNDEFINED:" % ident, + "try:", + "%s = context[%r]" % (ident, ident), + "except KeyError:", + "raise NameError(\"'%s' is not defined\")" % + ident, + None, None + ) + else: + self.printer.writeline( + "%s = _import_ns.get" + "(%r, context.get(%r, UNDEFINED))" % + (ident, ident, ident)) + else: + if self.compiler.strict_undefined: + self.printer.writelines( + "try:", + "%s = context[%r]" % (ident, ident), + "except KeyError:", + "raise NameError(\"'%s' is not defined\")" % + ident, + None + ) + else: + self.printer.writeline( + "%s = context.get(%r, UNDEFINED)" % (ident, ident) + ) + + self.printer.writeline("__M_writer = context.writer()") + + def write_def_decl(self, node, identifiers): + """write a locally-available callable referencing a top-level def""" + funcname = node.funcname + namedecls = node.get_argument_expressions() + nameargs = node.get_argument_expressions(as_call=True) + + if not self.in_def and ( + len(self.identifiers.locally_assigned) > 0 or + len(self.identifiers.argument_declared) > 0): + nameargs.insert(0, 'context._locals(__M_locals)') + else: + nameargs.insert(0, 'context') + self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls))) + self.printer.writeline( + "return render_%s(%s)" % (funcname, ",".join(nameargs))) + self.printer.writeline(None) + + def write_inline_def(self, node, identifiers, nested): + """write a locally-available def callable inside an enclosing def.""" + + namedecls = node.get_argument_expressions() + + decorator = node.decorator + if decorator: + self.printer.writeline( + "@runtime._decorate_inline(context, %s)" % decorator) + self.printer.writeline( + "def %s(%s):" % (node.funcname, ",".join(namedecls))) + filtered = len(node.filter_args.args) > 0 + buffered = eval(node.attributes.get('buffered', 'False')) + cached = eval(node.attributes.get('cached', 'False')) + self.printer.writelines( + # push new frame, assign current frame to __M_caller + "__M_caller = context.caller_stack._push_frame()", + "try:" + ) + if buffered or filtered or cached: + self.printer.writelines( + "context._push_buffer()", + ) + + identifiers = identifiers.branch(node, nested=nested) + + self.write_variable_declares(identifiers) + + self.identifier_stack.append(identifiers) + for n in node.nodes: + n.accept_visitor(self) + self.identifier_stack.pop() + + self.write_def_finish(node, buffered, filtered, cached) + self.printer.writeline(None) + if cached: + self.write_cache_decorator(node, node.funcname, + namedecls, False, identifiers, + inline=True, toplevel=False) + + def write_def_finish(self, node, buffered, filtered, cached, + callstack=True): + """write the end section of a rendering function, either outermost or + inline. + + this takes into account if the rendering function was filtered, + buffered, etc. and closes the corresponding try: block if any, and + writes code to retrieve captured content, apply filters, send proper + return value.""" + + if not buffered and not cached and not filtered: + self.printer.writeline("return ''") + if callstack: + self.printer.writelines( + "finally:", + "context.caller_stack._pop_frame()", + None + ) + + if buffered or filtered or cached: + if buffered or cached: + # in a caching scenario, don't try to get a writer + # from the context after popping; assume the caching + # implemenation might be using a context with no + # extra buffers + self.printer.writelines( + "finally:", + "__M_buf = context._pop_buffer()" + ) + else: + self.printer.writelines( + "finally:", + "__M_buf, __M_writer = context._pop_buffer_and_writer()" + ) + + if callstack: + self.printer.writeline("context.caller_stack._pop_frame()") + + s = "__M_buf.getvalue()" + if filtered: + s = self.create_filter_callable(node.filter_args.args, s, + False) + self.printer.writeline(None) + if buffered and not cached: + s = self.create_filter_callable(self.compiler.buffer_filters, + s, False) + if buffered or cached: + self.printer.writeline("return %s" % s) + else: + self.printer.writelines( + "__M_writer(%s)" % s, + "return ''" + ) + + def write_cache_decorator(self, node_or_pagetag, name, + args, buffered, identifiers, + inline=False, toplevel=False): + """write a post-function decorator to replace a rendering + callable with a cached version of itself.""" + + self.printer.writeline("__M_%s = %s" % (name, name)) + cachekey = node_or_pagetag.parsed_attributes.get('cache_key', + repr(name)) + + cache_args = {} + if self.compiler.pagetag is not None: + cache_args.update( + ( + pa[6:], + self.compiler.pagetag.parsed_attributes[pa] + ) + for pa in self.compiler.pagetag.parsed_attributes + if pa.startswith('cache_') and pa != 'cache_key' + ) + cache_args.update( + ( + pa[6:], + node_or_pagetag.parsed_attributes[pa] + ) for pa in node_or_pagetag.parsed_attributes + if pa.startswith('cache_') and pa != 'cache_key' + ) + if 'timeout' in cache_args: + cache_args['timeout'] = int(eval(cache_args['timeout'])) + + self.printer.writeline("def %s(%s):" % (name, ','.join(args))) + + # form "arg1, arg2, arg3=arg3, arg4=arg4", etc. + pass_args = [ + "%s=%s" % ((a.split('=')[0],) * 2) if '=' in a else a + for a in args + ] + + self.write_variable_declares( + identifiers, + toplevel=toplevel, + limit=node_or_pagetag.undeclared_identifiers() + ) + if buffered: + s = "context.get('local')."\ + "cache._ctx_get_or_create("\ + "%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % ( + cachekey, name, ','.join(pass_args), + ''.join(["%s=%s, " % (k, v) + for k, v in cache_args.items()]), + name + ) + # apply buffer_filters + s = self.create_filter_callable(self.compiler.buffer_filters, s, + False) + self.printer.writelines("return " + s, None) + else: + self.printer.writelines( + "__M_writer(context.get('local')." + "cache._ctx_get_or_create(" + "%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" % + ( + cachekey, name, ','.join(pass_args), + ''.join(["%s=%s, " % (k, v) + for k, v in cache_args.items()]), + name, + ), + "return ''", + None + ) + + def create_filter_callable(self, args, target, is_expression): + """write a filter-applying expression based on the filters + present in the given filter names, adjusting for the global + 'default' filter aliases as needed.""" + + def locate_encode(name): + if re.match(r'decode\..+', name): + return "filters." + name + elif self.compiler.disable_unicode: + return filters.NON_UNICODE_ESCAPES.get(name, name) + else: + return filters.DEFAULT_ESCAPES.get(name, name) + + if 'n' not in args: + if is_expression: + if self.compiler.pagetag: + args = self.compiler.pagetag.filter_args.args + args + if self.compiler.default_filters: + args = self.compiler.default_filters + args + for e in args: + # if filter given as a function, get just the identifier portion + if e == 'n': + continue + m = re.match(r'(.+?)(\(.*\))', e) + if m: + ident, fargs = m.group(1, 2) + f = locate_encode(ident) + e = f + fargs + else: + e = locate_encode(e) + assert e is not None + target = "%s(%s)" % (e, target) + return target + + def visitExpression(self, node): + self.printer.start_source(node.lineno) + if len(node.escapes) or \ + ( + self.compiler.pagetag is not None and + len(self.compiler.pagetag.filter_args.args) + ) or \ + len(self.compiler.default_filters): + + s = self.create_filter_callable(node.escapes_code.args, + "%s" % node.text, True) + self.printer.writeline("__M_writer(%s)" % s) + else: + self.printer.writeline("__M_writer(%s)" % node.text) + + def visitControlLine(self, node): + if node.isend: + self.printer.writeline(None) + if node.has_loop_context: + self.printer.writeline('finally:') + self.printer.writeline("loop = __M_loop._exit()") + self.printer.writeline(None) + else: + self.printer.start_source(node.lineno) + if self.compiler.enable_loop and node.keyword == 'for': + text = mangle_mako_loop(node, self.printer) + else: + text = node.text + self.printer.writeline(text) + children = node.get_children() + # this covers the three situations where we want to insert a pass: + # 1) a ternary control line with no children, + # 2) a primary control line with nothing but its own ternary + # and end control lines, and + # 3) any control line with no content other than comments + if not children or ( + compat.all(isinstance(c, (parsetree.Comment, + parsetree.ControlLine)) + for c in children) and + compat.all((node.is_ternary(c.keyword) or c.isend) + for c in children + if isinstance(c, parsetree.ControlLine))): + self.printer.writeline("pass") + + def visitText(self, node): + self.printer.start_source(node.lineno) + self.printer.writeline("__M_writer(%s)" % repr(node.content)) + + def visitTextTag(self, node): + filtered = len(node.filter_args.args) > 0 + if filtered: + self.printer.writelines( + "__M_writer = context._push_writer()", + "try:", + ) + for n in node.nodes: + n.accept_visitor(self) + if filtered: + self.printer.writelines( + "finally:", + "__M_buf, __M_writer = context._pop_buffer_and_writer()", + "__M_writer(%s)" % + self.create_filter_callable( + node.filter_args.args, + "__M_buf.getvalue()", + False), + None + ) + + def visitCode(self, node): + if not node.ismodule: + self.printer.start_source(node.lineno) + self.printer.write_indented_block(node.text) + + if not self.in_def and len(self.identifiers.locally_assigned) > 0: + # if we are the "template" def, fudge locally + # declared/modified variables into the "__M_locals" dictionary, + # which is used for def calls within the same template, + # to simulate "enclosing scope" + self.printer.writeline( + '__M_locals_builtin_stored = __M_locals_builtin()') + self.printer.writeline( + '__M_locals.update(__M_dict_builtin([(__M_key,' + ' __M_locals_builtin_stored[__M_key]) for __M_key in' + ' [%s] if __M_key in __M_locals_builtin_stored]))' % + ','.join([repr(x) for x in node.declared_identifiers()])) + + def visitIncludeTag(self, node): + self.printer.start_source(node.lineno) + args = node.attributes.get('args') + if args: + self.printer.writeline( + "runtime._include_file(context, %s, _template_uri, %s)" % + (node.parsed_attributes['file'], args)) + else: + self.printer.writeline( + "runtime._include_file(context, %s, _template_uri)" % + (node.parsed_attributes['file'])) + + def visitNamespaceTag(self, node): + pass + + def visitDefTag(self, node): + pass + + def visitBlockTag(self, node): + if node.is_anonymous: + self.printer.writeline("%s()" % node.funcname) + else: + nameargs = node.get_argument_expressions(as_call=True) + nameargs += ['**pageargs'] + self.printer.writeline( + "if 'parent' not in context._data or " + "not hasattr(context._data['parent'], '%s'):" + % node.funcname) + self.printer.writeline( + "context['self'].%s(%s)" % (node.funcname, ",".join(nameargs))) + self.printer.writeline("\n") + + def visitCallNamespaceTag(self, node): + # TODO: we can put namespace-specific checks here, such + # as ensure the given namespace will be imported, + # pre-import the namespace, etc. + self.visitCallTag(node) + + def visitCallTag(self, node): + self.printer.writeline("def ccall(caller):") + export = ['body'] + callable_identifiers = self.identifiers.branch(node, nested=True) + body_identifiers = callable_identifiers.branch(node, nested=False) + # we want the 'caller' passed to ccall to be used + # for the body() function, but for other non-body() + # <%def>s within <%call> we want the current caller + # off the call stack (if any) + body_identifiers.add_declared('caller') + + self.identifier_stack.append(body_identifiers) + + class DefVisitor(object): + + def visitDefTag(s, node): + s.visitDefOrBase(node) + + def visitBlockTag(s, node): + s.visitDefOrBase(node) + + def visitDefOrBase(s, node): + self.write_inline_def(node, callable_identifiers, nested=False) + if not node.is_anonymous: + export.append(node.funcname) + # remove defs that are within the <%call> from the + # "closuredefs" defined in the body, so they dont render twice + if node.funcname in body_identifiers.closuredefs: + del body_identifiers.closuredefs[node.funcname] + + vis = DefVisitor() + for n in node.nodes: + n.accept_visitor(vis) + self.identifier_stack.pop() + + bodyargs = node.body_decl.get_argument_expressions() + self.printer.writeline("def body(%s):" % ','.join(bodyargs)) + + # TODO: figure out best way to specify + # buffering/nonbuffering (at call time would be better) + buffered = False + if buffered: + self.printer.writelines( + "context._push_buffer()", + "try:" + ) + self.write_variable_declares(body_identifiers) + self.identifier_stack.append(body_identifiers) + + for n in node.nodes: + n.accept_visitor(self) + self.identifier_stack.pop() + + self.write_def_finish(node, buffered, False, False, callstack=False) + self.printer.writelines( + None, + "return [%s]" % (','.join(export)), + None + ) + + self.printer.writelines( + # push on caller for nested call + "context.caller_stack.nextcaller = " + "runtime.Namespace('caller', context, " + "callables=ccall(__M_caller))", + "try:") + self.printer.start_source(node.lineno) + self.printer.writelines( + "__M_writer(%s)" % self.create_filter_callable( + [], node.expression, True), + "finally:", + "context.caller_stack.nextcaller = None", + None + ) + + +class _Identifiers(object): + + """tracks the status of identifier names as template code is rendered.""" + + def __init__(self, compiler, node=None, parent=None, nested=False): + if parent is not None: + # if we are the branch created in write_namespaces(), + # we don't share any context from the main body(). + if isinstance(node, parsetree.NamespaceTag): + self.declared = set() + self.topleveldefs = util.SetLikeDict() + else: + # things that have already been declared + # in an enclosing namespace (i.e. names we can just use) + self.declared = set(parent.declared).\ + union([c.name for c in parent.closuredefs.values()]).\ + union(parent.locally_declared).\ + union(parent.argument_declared) + + # if these identifiers correspond to a "nested" + # scope, it means whatever the parent identifiers + # had as undeclared will have been declared by that parent, + # and therefore we have them in our scope. + if nested: + self.declared = self.declared.union(parent.undeclared) + + # top level defs that are available + self.topleveldefs = util.SetLikeDict(**parent.topleveldefs) + else: + self.declared = set() + self.topleveldefs = util.SetLikeDict() + + self.compiler = compiler + + # things within this level that are referenced before they + # are declared (e.g. assigned to) + self.undeclared = set() + + # things that are declared locally. some of these things + # could be in the "undeclared" list as well if they are + # referenced before declared + self.locally_declared = set() + + # assignments made in explicit python blocks. + # these will be propagated to + # the context of local def calls. + self.locally_assigned = set() + + # things that are declared in the argument + # signature of the def callable + self.argument_declared = set() + + # closure defs that are defined in this level + self.closuredefs = util.SetLikeDict() + + self.node = node + + if node is not None: + node.accept_visitor(self) + + illegal_names = self.compiler.reserved_names.intersection( + self.locally_declared) + if illegal_names: + raise exceptions.NameConflictError( + "Reserved words declared in template: %s" % + ", ".join(illegal_names)) + + def branch(self, node, **kwargs): + """create a new Identifiers for a new Node, with + this Identifiers as the parent.""" + + return _Identifiers(self.compiler, node, self, **kwargs) + + @property + def defs(self): + return set(self.topleveldefs.union(self.closuredefs).values()) + + def __repr__(self): + return "Identifiers(declared=%r, locally_declared=%r, "\ + "undeclared=%r, topleveldefs=%r, closuredefs=%r, "\ + "argumentdeclared=%r)" %\ + ( + list(self.declared), + list(self.locally_declared), + list(self.undeclared), + [c.name for c in self.topleveldefs.values()], + [c.name for c in self.closuredefs.values()], + self.argument_declared) + + def check_declared(self, node): + """update the state of this Identifiers with the undeclared + and declared identifiers of the given node.""" + + for ident in node.undeclared_identifiers(): + if ident != 'context' and\ + ident not in self.declared.union(self.locally_declared): + self.undeclared.add(ident) + for ident in node.declared_identifiers(): + self.locally_declared.add(ident) + + def add_declared(self, ident): + self.declared.add(ident) + if ident in self.undeclared: + self.undeclared.remove(ident) + + def visitExpression(self, node): + self.check_declared(node) + + def visitControlLine(self, node): + self.check_declared(node) + + def visitCode(self, node): + if not node.ismodule: + self.check_declared(node) + self.locally_assigned = self.locally_assigned.union( + node.declared_identifiers()) + + def visitNamespaceTag(self, node): + # only traverse into the sub-elements of a + # <%namespace> tag if we are the branch created in + # write_namespaces() + if self.node is node: + for n in node.nodes: + n.accept_visitor(self) + + def _check_name_exists(self, collection, node): + existing = collection.get(node.funcname) + collection[node.funcname] = node + if existing is not None and \ + existing is not node and \ + (node.is_block or existing.is_block): + raise exceptions.CompileException( + "%%def or %%block named '%s' already " + "exists in this template." % + node.funcname, **node.exception_kwargs) + + def visitDefTag(self, node): + if node.is_root() and not node.is_anonymous: + self._check_name_exists(self.topleveldefs, node) + elif node is not self.node: + self._check_name_exists(self.closuredefs, node) + + for ident in node.undeclared_identifiers(): + if ident != 'context' and \ + ident not in self.declared.union(self.locally_declared): + self.undeclared.add(ident) + + # visit defs only one level deep + if node is self.node: + for ident in node.declared_identifiers(): + self.argument_declared.add(ident) + + for n in node.nodes: + n.accept_visitor(self) + + def visitBlockTag(self, node): + if node is not self.node and not node.is_anonymous: + + if isinstance(self.node, parsetree.DefTag): + raise exceptions.CompileException( + "Named block '%s' not allowed inside of def '%s'" + % (node.name, self.node.name), **node.exception_kwargs) + elif isinstance(self.node, + (parsetree.CallTag, parsetree.CallNamespaceTag)): + raise exceptions.CompileException( + "Named block '%s' not allowed inside of <%%call> tag" + % (node.name, ), **node.exception_kwargs) + + for ident in node.undeclared_identifiers(): + if ident != 'context' and \ + ident not in self.declared.union(self.locally_declared): + self.undeclared.add(ident) + + if not node.is_anonymous: + self._check_name_exists(self.topleveldefs, node) + self.undeclared.add(node.funcname) + elif node is not self.node: + self._check_name_exists(self.closuredefs, node) + for ident in node.declared_identifiers(): + self.argument_declared.add(ident) + for n in node.nodes: + n.accept_visitor(self) + + def visitTextTag(self, node): + for ident in node.undeclared_identifiers(): + if ident != 'context' and \ + ident not in self.declared.union(self.locally_declared): + self.undeclared.add(ident) + + def visitIncludeTag(self, node): + self.check_declared(node) + + def visitPageTag(self, node): + for ident in node.declared_identifiers(): + self.argument_declared.add(ident) + self.check_declared(node) + + def visitCallNamespaceTag(self, node): + self.visitCallTag(node) + + def visitCallTag(self, node): + if node is self.node: + for ident in node.undeclared_identifiers(): + if ident != 'context' and \ + ident not in self.declared.union( + self.locally_declared): + self.undeclared.add(ident) + for ident in node.declared_identifiers(): + self.argument_declared.add(ident) + for n in node.nodes: + n.accept_visitor(self) + else: + for ident in node.undeclared_identifiers(): + if ident != 'context' and \ + ident not in self.declared.union( + self.locally_declared): + self.undeclared.add(ident) + + +_FOR_LOOP = re.compile( + r'^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*' + r'(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):' +) + + +def mangle_mako_loop(node, printer): + """converts a for loop into a context manager wrapped around a for loop + when access to the `loop` variable has been detected in the for loop body + """ + loop_variable = LoopVariable() + node.accept_visitor(loop_variable) + if loop_variable.detected: + node.nodes[-1].has_loop_context = True + match = _FOR_LOOP.match(node.text) + if match: + printer.writelines( + 'loop = __M_loop._enter(%s)' % match.group(2), + 'try:' + # 'with __M_loop(%s) as loop:' % match.group(2) + ) + text = 'for %s in loop:' % match.group(1) + else: + raise SyntaxError("Couldn't apply loop context: %s" % node.text) + else: + text = node.text + return text + + +class LoopVariable(object): + + """A node visitor which looks for the name 'loop' within undeclared + identifiers.""" + + def __init__(self): + self.detected = False + + def _loop_reference_detected(self, node): + if 'loop' in node.undeclared_identifiers(): + self.detected = True + else: + for n in node.get_children(): + n.accept_visitor(self) + + def visitControlLine(self, node): + self._loop_reference_detected(node) + + def visitCode(self, node): + self._loop_reference_detected(node) + + def visitExpression(self, node): + self._loop_reference_detected(node) diff --git a/server/www/packages/packages-windows/x86/mako/compat.py b/server/www/packages/packages-windows/x86/mako/compat.py new file mode 100644 index 0000000..a2ab243 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/compat.py @@ -0,0 +1,201 @@ +import sys +import time + +py3k = sys.version_info >= (3, 0) +py33 = sys.version_info >= (3, 3) +py2k = sys.version_info < (3,) +py26 = sys.version_info >= (2, 6) +py27 = sys.version_info >= (2, 7) +jython = sys.platform.startswith('java') +win32 = sys.platform.startswith('win') +pypy = hasattr(sys, 'pypy_version_info') + +if py3k: + # create a "getargspec" from getfullargspec(), which is not deprecated + # in Py3K; getargspec() has started to emit warnings as of Py3.5. + # As of Py3.4, now they are trying to move from getfullargspec() + # to "signature()", but getfullargspec() is not deprecated, so stick + # with that for now. + + import collections + ArgSpec = collections.namedtuple( + "ArgSpec", + ["args", "varargs", "keywords", "defaults"]) + from inspect import getfullargspec as inspect_getfullargspec + + def inspect_getargspec(func): + return ArgSpec( + *inspect_getfullargspec(func)[0:4] + ) +else: + from inspect import getargspec as inspect_getargspec # noqa + + +if py3k: + from io import StringIO + import builtins as compat_builtins + from urllib.parse import quote_plus, unquote_plus + from html.entities import codepoint2name, name2codepoint + string_types = str, + binary_type = bytes + text_type = str + + from io import BytesIO as byte_buffer + + def u(s): + return s + + def b(s): + return s.encode("latin-1") + + def octal(lit): + return eval("0o" + lit) + +else: + import __builtin__ as compat_builtins # noqa + try: + from cStringIO import StringIO + except: + from StringIO import StringIO + + byte_buffer = StringIO + + from urllib import quote_plus, unquote_plus # noqa + from htmlentitydefs import codepoint2name, name2codepoint # noqa + string_types = basestring, # noqa + binary_type = str + text_type = unicode # noqa + + def u(s): + return unicode(s, "utf-8") # noqa + + def b(s): + return s + + def octal(lit): + return eval("0" + lit) + + +if py33: + from importlib import machinery + + def load_module(module_id, path): + return machinery.SourceFileLoader(module_id, path).load_module() +else: + import imp + + def load_module(module_id, path): + fp = open(path, 'rb') + try: + return imp.load_source(module_id, path, fp) + finally: + fp.close() + + +if py3k: + def reraise(tp, value, tb=None, cause=None): + if cause is not None: + value.__cause__ = cause + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value +else: + exec("def reraise(tp, value, tb=None, cause=None):\n" + " raise tp, value, tb\n") + + +def exception_as(): + return sys.exc_info()[1] + +try: + import threading + if py3k: + import _thread as thread + else: + import thread +except ImportError: + import dummy_threading as threading # noqa + if py3k: + import _dummy_thread as thread + else: + import dummy_thread as thread # noqa + +if win32 or jython: + time_func = time.clock +else: + time_func = time.time + +try: + from functools import partial +except: + def partial(func, *args, **keywords): + def newfunc(*fargs, **fkeywords): + newkeywords = keywords.copy() + newkeywords.update(fkeywords) + return func(*(args + fargs), **newkeywords) + return newfunc + + +all = all +import json # noqa + + +def exception_name(exc): + return exc.__class__.__name__ + +try: + from inspect import CO_VARKEYWORDS, CO_VARARGS + + def inspect_func_args(fn): + if py3k: + co = fn.__code__ + else: + co = fn.func_code + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + + if py3k: + return args, varargs, varkw, fn.__defaults__ + else: + return args, varargs, varkw, fn.func_defaults +except ImportError: + import inspect + + def inspect_func_args(fn): + return inspect.getargspec(fn) + +if py3k: + def callable(fn): + return hasattr(fn, '__call__') +else: + callable = callable + + +################################################ +# cross-compatible metaclass implementation +# Copyright (c) 2010-2012 Benjamin Peterson +def with_metaclass(meta, base=object): + """Create a base class with a metaclass.""" + return meta("%sBase" % meta.__name__, (base,), {}) +################################################ + + +def arg_stringname(func_arg): + """Gets the string name of a kwarg or vararg + In Python3.4 a function's args are + of _ast.arg type not _ast.name + """ + if hasattr(func_arg, 'arg'): + return func_arg.arg + else: + return str(func_arg) diff --git a/server/www/packages/packages-windows/x86/mako/exceptions.py b/server/www/packages/packages-windows/x86/mako/exceptions.py new file mode 100644 index 0000000..cb6fb3f --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/exceptions.py @@ -0,0 +1,394 @@ +# mako/exceptions.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""exception classes""" + +import traceback +import sys +from mako import util, compat + + +class MakoException(Exception): + pass + + +class RuntimeException(MakoException): + pass + + +def _format_filepos(lineno, pos, filename): + if filename is None: + return " at line: %d char: %d" % (lineno, pos) + else: + return " in file '%s' at line: %d char: %d" % (filename, lineno, pos) + + +class CompileException(MakoException): + + def __init__(self, message, source, lineno, pos, filename): + MakoException.__init__( + self, + message + _format_filepos(lineno, pos, filename)) + self.lineno = lineno + self.pos = pos + self.filename = filename + self.source = source + + +class SyntaxException(MakoException): + + def __init__(self, message, source, lineno, pos, filename): + MakoException.__init__( + self, + message + _format_filepos(lineno, pos, filename)) + self.lineno = lineno + self.pos = pos + self.filename = filename + self.source = source + + +class UnsupportedError(MakoException): + + """raised when a retired feature is used.""" + + +class NameConflictError(MakoException): + + """raised when a reserved word is used inappropriately""" + + +class TemplateLookupException(MakoException): + pass + + +class TopLevelLookupException(TemplateLookupException): + pass + + +class RichTraceback(object): + + """Pull the current exception from the ``sys`` traceback and extracts + Mako-specific template information. + + See the usage examples in :ref:`handling_exceptions`. + + """ + + def __init__(self, error=None, traceback=None): + self.source, self.lineno = "", 0 + + if error is None or traceback is None: + t, value, tback = sys.exc_info() + + if error is None: + error = value or t + + if traceback is None: + traceback = tback + + self.error = error + self.records = self._init(traceback) + + if isinstance(self.error, (CompileException, SyntaxException)): + self.source = self.error.source + self.lineno = self.error.lineno + self._has_source = True + + self._init_message() + + @property + def errorname(self): + return compat.exception_name(self.error) + + def _init_message(self): + """Find a unicode representation of self.error""" + try: + self.message = compat.text_type(self.error) + except UnicodeError: + try: + self.message = str(self.error) + except UnicodeEncodeError: + # Fallback to args as neither unicode nor + # str(Exception(u'\xe6')) work in Python < 2.6 + self.message = self.error.args[0] + if not isinstance(self.message, compat.text_type): + self.message = compat.text_type(self.message, 'ascii', 'replace') + + def _get_reformatted_records(self, records): + for rec in records: + if rec[6] is not None: + yield (rec[4], rec[5], rec[2], rec[6]) + else: + yield tuple(rec[0:4]) + + @property + def traceback(self): + """Return a list of 4-tuple traceback records (i.e. normal python + format) with template-corresponding lines remapped to the originating + template. + + """ + return list(self._get_reformatted_records(self.records)) + + @property + def reverse_records(self): + return reversed(self.records) + + @property + def reverse_traceback(self): + """Return the same data as traceback, except in reverse order. + """ + + return list(self._get_reformatted_records(self.reverse_records)) + + def _init(self, trcback): + """format a traceback from sys.exc_info() into 7-item tuples, + containing the regular four traceback tuple items, plus the original + template filename, the line number adjusted relative to the template + source, and code line from that line number of the template.""" + + import mako.template + mods = {} + rawrecords = traceback.extract_tb(trcback) + new_trcback = [] + for filename, lineno, function, line in rawrecords: + if not line: + line = '' + try: + (line_map, template_lines) = mods[filename] + except KeyError: + try: + info = mako.template._get_module_info(filename) + module_source = info.code + template_source = info.source + template_filename = info.template_filename or filename + except KeyError: + # A normal .py file (not a Template) + if not compat.py3k: + try: + fp = open(filename, 'rb') + encoding = util.parse_encoding(fp) + fp.close() + except IOError: + encoding = None + if encoding: + line = line.decode(encoding) + else: + line = line.decode('ascii', 'replace') + new_trcback.append((filename, lineno, function, line, + None, None, None, None)) + continue + + template_ln = 1 + + source_map = mako.template.ModuleInfo.\ + get_module_source_metadata( + module_source, full_line_map=True) + line_map = source_map['full_line_map'] + + template_lines = [line_ for line_ in + template_source.split("\n")] + mods[filename] = (line_map, template_lines) + + template_ln = line_map[lineno - 1] + + if template_ln <= len(template_lines): + template_line = template_lines[template_ln - 1] + else: + template_line = None + new_trcback.append((filename, lineno, function, + line, template_filename, template_ln, + template_line, template_source)) + if not self.source: + for l in range(len(new_trcback) - 1, 0, -1): + if new_trcback[l][5]: + self.source = new_trcback[l][7] + self.lineno = new_trcback[l][5] + break + else: + if new_trcback: + try: + # A normal .py file (not a Template) + fp = open(new_trcback[-1][0], 'rb') + encoding = util.parse_encoding(fp) + fp.seek(0) + self.source = fp.read() + fp.close() + if encoding: + self.source = self.source.decode(encoding) + except IOError: + self.source = '' + self.lineno = new_trcback[-1][1] + return new_trcback + + +def text_error_template(lookup=None): + """Provides a template that renders a stack trace in a similar format to + the Python interpreter, substituting source template filenames, line + numbers and code for that of the originating source template, as + applicable. + + """ + import mako.template + return mako.template.Template(r""" +<%page args="error=None, traceback=None"/> +<%! + from mako.exceptions import RichTraceback +%>\ +<% + tback = RichTraceback(error=error, traceback=traceback) +%>\ +Traceback (most recent call last): +% for (filename, lineno, function, line) in tback.traceback: + File "${filename}", line ${lineno}, in ${function or '?'} + ${line | trim} +% endfor +${tback.errorname}: ${tback.message} +""") + + +def _install_pygments(): + global syntax_highlight, pygments_html_formatter + from mako.ext.pygmentplugin import syntax_highlight # noqa + from mako.ext.pygmentplugin import pygments_html_formatter # noqa + + +def _install_fallback(): + global syntax_highlight, pygments_html_formatter + from mako.filters import html_escape + pygments_html_formatter = None + + def syntax_highlight(filename='', language=None): + return html_escape + + +def _install_highlighting(): + try: + _install_pygments() + except ImportError: + _install_fallback() +_install_highlighting() + + +def html_error_template(): + """Provides a template that renders a stack trace in an HTML format, + providing an excerpt of code as well as substituting source template + filenames, line numbers and code for that of the originating source + template, as applicable. + + The template's default ``encoding_errors`` value is + ``'htmlentityreplace'``. The template has two options. With the + ``full`` option disabled, only a section of an HTML document is + returned. With the ``css`` option disabled, the default stylesheet + won't be included. + + """ + import mako.template + return mako.template.Template(r""" +<%! + from mako.exceptions import RichTraceback, syntax_highlight,\ + pygments_html_formatter +%> +<%page args="full=True, css=True, error=None, traceback=None"/> +% if full: + + + Mako Runtime Error +% endif +% if css: + +% endif +% if full: + + +% endif + +

Error !

+<% + tback = RichTraceback(error=error, traceback=traceback) + src = tback.source + line = tback.lineno + if src: + lines = src.split('\n') + else: + lines = None +%> +

${tback.errorname}: ${tback.message|h}

+ +% if lines: +
+
+% for index in range(max(0, line-4),min(len(lines), line+5)): + <% + if pygments_html_formatter: + pygments_html_formatter.linenostart = index + 1 + %> + % if index + 1 == line: + <% + if pygments_html_formatter: + old_cssclass = pygments_html_formatter.cssclass + pygments_html_formatter.cssclass = 'error ' + old_cssclass + %> + ${lines[index] | syntax_highlight(language='mako')} + <% + if pygments_html_formatter: + pygments_html_formatter.cssclass = old_cssclass + %> + % else: + ${lines[index] | syntax_highlight(language='mako')} + % endif +% endfor +
+
+% endif + +
+% for (filename, lineno, function, line) in tback.reverse_traceback: +
${filename}, line ${lineno}:
+
+ <% + if pygments_html_formatter: + pygments_html_formatter.linenostart = lineno + %> +
${line | syntax_highlight(filename)}
+
+% endfor +
+ +% if full: + + +% endif +""", output_encoding=sys.getdefaultencoding(), + encoding_errors='htmlentityreplace') diff --git a/server/www/packages/packages-windows/x86/mako/ext/__init__.py b/server/www/packages/packages-windows/x86/mako/ext/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/www/packages/packages-windows/x86/mako/ext/autohandler.py b/server/www/packages/packages-windows/x86/mako/ext/autohandler.py new file mode 100644 index 0000000..9d1c911 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/autohandler.py @@ -0,0 +1,68 @@ +# ext/autohandler.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""adds autohandler functionality to Mako templates. + +requires that the TemplateLookup class is used with templates. + +usage: + +<%! + from mako.ext.autohandler import autohandler +%> +<%inherit file="${autohandler(template, context)}"/> + + +or with custom autohandler filename: + +<%! + from mako.ext.autohandler import autohandler +%> +<%inherit file="${autohandler(template, context, name='somefilename')}"/> + +""" + +import posixpath +import os +import re + + +def autohandler(template, context, name='autohandler'): + lookup = context.lookup + _template_uri = template.module._template_uri + if not lookup.filesystem_checks: + try: + return lookup._uri_cache[(autohandler, _template_uri, name)] + except KeyError: + pass + + tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name] + while len(tokens): + path = '/' + '/'.join(tokens) + if path != _template_uri and _file_exists(lookup, path): + if not lookup.filesystem_checks: + return lookup._uri_cache.setdefault( + (autohandler, _template_uri, name), path) + else: + return path + if len(tokens) == 1: + break + tokens[-2:] = [name] + + if not lookup.filesystem_checks: + return lookup._uri_cache.setdefault( + (autohandler, _template_uri, name), None) + else: + return None + + +def _file_exists(lookup, path): + psub = re.sub(r'^/', '', path) + for d in lookup.directories: + if os.path.exists(d + '/' + psub): + return True + else: + return False diff --git a/server/www/packages/packages-windows/x86/mako/ext/babelplugin.py b/server/www/packages/packages-windows/x86/mako/ext/babelplugin.py new file mode 100644 index 0000000..0b5e84f --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/babelplugin.py @@ -0,0 +1,50 @@ +# ext/babelplugin.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""gettext message extraction via Babel: http://babel.edgewall.org/""" +from babel.messages.extract import extract_python +from mako.ext.extract import MessageExtractor + + +class BabelMakoExtractor(MessageExtractor): + + def __init__(self, keywords, comment_tags, options): + self.keywords = keywords + self.options = options + self.config = { + 'comment-tags': u' '.join(comment_tags), + 'encoding': options.get('input_encoding', + options.get('encoding', None)), + } + super(BabelMakoExtractor, self).__init__() + + def __call__(self, fileobj): + return self.process_file(fileobj) + + def process_python(self, code, code_lineno, translator_strings): + comment_tags = self.config['comment-tags'] + for lineno, funcname, messages, python_translator_comments \ + in extract_python(code, + self.keywords, comment_tags, self.options): + yield (code_lineno + (lineno - 1), funcname, messages, + translator_strings + python_translator_comments) + + +def extract(fileobj, keywords, comment_tags, options): + """Extract messages from Mako templates. + + :param fileobj: the file-like object the messages should be extracted from + :param keywords: a list of keywords (i.e. function names) that should be + recognized as translation functions + :param comment_tags: a list of translator tags to search for and include + in the results + :param options: a dictionary of additional options (optional) + :return: an iterator over ``(lineno, funcname, message, comments)`` tuples + :rtype: ``iterator`` + """ + extractor = BabelMakoExtractor(keywords, comment_tags, options) + for message in extractor(fileobj): + yield message diff --git a/server/www/packages/packages-windows/x86/mako/ext/beaker_cache.py b/server/www/packages/packages-windows/x86/mako/ext/beaker_cache.py new file mode 100644 index 0000000..c7c260d --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/beaker_cache.py @@ -0,0 +1,76 @@ +"""Provide a :class:`.CacheImpl` for the Beaker caching system.""" + +from mako import exceptions + +from mako.cache import CacheImpl + +try: + from beaker import cache as beaker_cache +except: + has_beaker = False +else: + has_beaker = True + +_beaker_cache = None + + +class BeakerCacheImpl(CacheImpl): + + """A :class:`.CacheImpl` provided for the Beaker caching system. + + This plugin is used by default, based on the default + value of ``'beaker'`` for the ``cache_impl`` parameter of the + :class:`.Template` or :class:`.TemplateLookup` classes. + + """ + + def __init__(self, cache): + if not has_beaker: + raise exceptions.RuntimeException( + "Can't initialize Beaker plugin; Beaker is not installed.") + global _beaker_cache + if _beaker_cache is None: + if 'manager' in cache.template.cache_args: + _beaker_cache = cache.template.cache_args['manager'] + else: + _beaker_cache = beaker_cache.CacheManager() + super(BeakerCacheImpl, self).__init__(cache) + + def _get_cache(self, **kw): + expiretime = kw.pop('timeout', None) + if 'dir' in kw: + kw['data_dir'] = kw.pop('dir') + elif self.cache.template.module_directory: + kw['data_dir'] = self.cache.template.module_directory + + if 'manager' in kw: + kw.pop('manager') + + if kw.get('type') == 'memcached': + kw['type'] = 'ext:memcached' + + if 'region' in kw: + region = kw.pop('region') + cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw) + else: + cache = _beaker_cache.get_cache(self.cache.id, **kw) + cache_args = {'starttime': self.cache.starttime} + if expiretime: + cache_args['expiretime'] = expiretime + return cache, cache_args + + def get_or_create(self, key, creation_function, **kw): + cache, kw = self._get_cache(**kw) + return cache.get(key, createfunc=creation_function, **kw) + + def put(self, key, value, **kw): + cache, kw = self._get_cache(**kw) + cache.put(key, value, **kw) + + def get(self, key, **kw): + cache, kw = self._get_cache(**kw) + return cache.get(key, **kw) + + def invalidate(self, key, **kw): + cache, kw = self._get_cache(**kw) + cache.remove_value(key, **kw) diff --git a/server/www/packages/packages-windows/x86/mako/ext/extract.py b/server/www/packages/packages-windows/x86/mako/ext/extract.py new file mode 100644 index 0000000..d777ea8 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/extract.py @@ -0,0 +1,108 @@ +import re +from mako import compat +from mako import lexer +from mako import parsetree + + +class MessageExtractor(object): + + def process_file(self, fileobj): + template_node = lexer.Lexer( + fileobj.read(), + input_encoding=self.config['encoding']).parse() + for extracted in self.extract_nodes(template_node.get_children()): + yield extracted + + def extract_nodes(self, nodes): + translator_comments = [] + in_translator_comments = False + input_encoding = self.config['encoding'] or 'ascii' + comment_tags = list( + filter(None, re.split(r'\s+', self.config['comment-tags']))) + + for node in nodes: + child_nodes = None + if in_translator_comments and \ + isinstance(node, parsetree.Text) and \ + not node.content.strip(): + # Ignore whitespace within translator comments + continue + + if isinstance(node, parsetree.Comment): + value = node.text.strip() + if in_translator_comments: + translator_comments.extend( + self._split_comment(node.lineno, value)) + continue + for comment_tag in comment_tags: + if value.startswith(comment_tag): + in_translator_comments = True + translator_comments.extend( + self._split_comment(node.lineno, value)) + continue + + if isinstance(node, parsetree.DefTag): + code = node.function_decl.code + child_nodes = node.nodes + elif isinstance(node, parsetree.BlockTag): + code = node.body_decl.code + child_nodes = node.nodes + elif isinstance(node, parsetree.CallTag): + code = node.code.code + child_nodes = node.nodes + elif isinstance(node, parsetree.PageTag): + code = node.body_decl.code + elif isinstance(node, parsetree.CallNamespaceTag): + code = node.expression + child_nodes = node.nodes + elif isinstance(node, parsetree.ControlLine): + if node.isend: + in_translator_comments = False + continue + code = node.text + elif isinstance(node, parsetree.Code): + in_translator_comments = False + code = node.code.code + elif isinstance(node, parsetree.Expression): + code = node.code.code + else: + continue + + # Comments don't apply unless they immediately precede the message + if translator_comments and \ + translator_comments[-1][0] < node.lineno - 1: + translator_comments = [] + + translator_strings = [ + comment[1] for comment in translator_comments] + + if isinstance(code, compat.text_type): + code = code.encode(input_encoding, 'backslashreplace') + + used_translator_comments = False + # We add extra newline to work around a pybabel bug + # (see python-babel/babel#274, parse_encoding dies if the first + # input string of the input is non-ascii) + # Also, because we added it, we have to subtract one from + # node.lineno + code = compat.byte_buffer(compat.b('\n') + code) + + for message in self.process_python( + code, node.lineno - 1, translator_strings): + yield message + used_translator_comments = True + + if used_translator_comments: + translator_comments = [] + in_translator_comments = False + + if child_nodes: + for extracted in self.extract_nodes(child_nodes): + yield extracted + + @staticmethod + def _split_comment(lineno, comment): + """Return the multiline comment at lineno split into a list of + comment line numbers and the accompanying comment line""" + return [(lineno + index, line) for index, line in + enumerate(comment.splitlines())] diff --git a/server/www/packages/packages-windows/x86/mako/ext/linguaplugin.py b/server/www/packages/packages-windows/x86/mako/ext/linguaplugin.py new file mode 100644 index 0000000..46b0d6a --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/linguaplugin.py @@ -0,0 +1,43 @@ +import io +from lingua.extractors import Extractor +from lingua.extractors import Message +from lingua.extractors import get_extractor +from mako.ext.extract import MessageExtractor +from mako import compat + + +class LinguaMakoExtractor(Extractor, MessageExtractor): + + '''Mako templates''' + extensions = ['.mako'] + default_config = { + 'encoding': 'utf-8', + 'comment-tags': '', + } + + def __call__(self, filename, options, fileobj=None): + self.options = options + self.filename = filename + self.python_extractor = get_extractor('x.py') + if fileobj is None: + fileobj = open(filename, 'rb') + return self.process_file(fileobj) + + def process_python(self, code, code_lineno, translator_strings): + source = code.getvalue().strip() + if source.endswith(compat.b(':')): + if source in (compat.b('try:'), compat.b('else:')) or source.startswith(compat.b('except')): + source = compat.b('') # Ignore try/except and else + elif source.startswith(compat.b('elif')): + source = source[2:] # Replace "elif" with "if" + source += compat.b('pass') + code = io.BytesIO(source) + for msg in self.python_extractor( + self.filename, self.options, code, code_lineno -1): + if translator_strings: + msg = Message(msg.msgctxt, msg.msgid, msg.msgid_plural, + msg.flags, + compat.u(' ').join( + translator_strings + [msg.comment]), + msg.tcomment, msg.location) + yield msg diff --git a/server/www/packages/packages-windows/x86/mako/ext/preprocessors.py b/server/www/packages/packages-windows/x86/mako/ext/preprocessors.py new file mode 100644 index 0000000..9b700d1 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/preprocessors.py @@ -0,0 +1,20 @@ +# ext/preprocessors.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""preprocessing functions, used with the 'preprocessor' +argument on Template, TemplateLookup""" + +import re + + +def convert_comments(text): + """preprocess old style comments. + + example: + + from mako.ext.preprocessors import convert_comments + t = Template(..., preprocessor=convert_comments)""" + return re.sub(r'(?<=\n)\s*#[^#]', "##", text) diff --git a/server/www/packages/packages-windows/x86/mako/ext/pygmentplugin.py b/server/www/packages/packages-windows/x86/mako/ext/pygmentplugin.py new file mode 100644 index 0000000..4057caa --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/pygmentplugin.py @@ -0,0 +1,127 @@ +# ext/pygmentplugin.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from pygments.lexers.web import \ + HtmlLexer, XmlLexer, JavascriptLexer, CssLexer +from pygments.lexers.agile import PythonLexer, Python3Lexer +from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \ + include, using +from pygments.token import \ + Text, Comment, Operator, Keyword, Name, String, Other +from pygments.formatters.html import HtmlFormatter +from pygments import highlight +from mako import compat + + +class MakoLexer(RegexLexer): + name = 'Mako' + aliases = ['mako'] + filenames = ['*.mao'] + + tokens = { + 'root': [ + (r'(\s*)(\%)(\s*end(?:\w+))(\n|\Z)', + bygroups(Text, Comment.Preproc, Keyword, Other)), + (r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)', + bygroups(Text, Comment.Preproc, using(PythonLexer), Other)), + (r'(\s*)(##[^\n]*)(\n|\Z)', + bygroups(Text, Comment.Preproc, Other)), + (r'''(?s)<%doc>.*?''', Comment.Preproc), + (r'(<%)([\w\.\:]+)', + bygroups(Comment.Preproc, Name.Builtin), 'tag'), + (r'()', + bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), + (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'), + (r'(<%(?:!?))(.*?)(%>)(?s)', + bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), + (r'(\$\{)(.*?)(\})', + bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), + (r'''(?sx) + (.+?) # anything, followed by: + (?: + (?<=\n)(?=%(?!%)|\#\#) | # an eval or comment line + (?=\#\*) | # multiline comment + (?=', Comment.Preproc, '#pop'), + (r'\s+', Text), + ], + 'attr': [ + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } + + +class MakoHtmlLexer(DelegatingLexer): + name = 'HTML+Mako' + aliases = ['html+mako'] + + def __init__(self, **options): + super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, + **options) + + +class MakoXmlLexer(DelegatingLexer): + name = 'XML+Mako' + aliases = ['xml+mako'] + + def __init__(self, **options): + super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, + **options) + + +class MakoJavascriptLexer(DelegatingLexer): + name = 'JavaScript+Mako' + aliases = ['js+mako', 'javascript+mako'] + + def __init__(self, **options): + super(MakoJavascriptLexer, self).__init__(JavascriptLexer, + MakoLexer, **options) + + +class MakoCssLexer(DelegatingLexer): + name = 'CSS+Mako' + aliases = ['css+mako'] + + def __init__(self, **options): + super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, + **options) + + +pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted', + linenos=True) + + +def syntax_highlight(filename='', language=None): + mako_lexer = MakoLexer() + if compat.py3k: + python_lexer = Python3Lexer() + else: + python_lexer = PythonLexer() + if filename.startswith('memory:') or language == 'mako': + return lambda string: highlight(string, mako_lexer, + pygments_html_formatter) + return lambda string: highlight(string, python_lexer, + pygments_html_formatter) diff --git a/server/www/packages/packages-windows/x86/mako/ext/turbogears.py b/server/www/packages/packages-windows/x86/mako/ext/turbogears.py new file mode 100644 index 0000000..eaa2d78 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/ext/turbogears.py @@ -0,0 +1,58 @@ +# ext/turbogears.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from mako import compat +from mako.lookup import TemplateLookup +from mako.template import Template + + +class TGPlugin(object): + + """TurboGears compatible Template Plugin.""" + + def __init__(self, extra_vars_func=None, options=None, extension='mak'): + self.extra_vars_func = extra_vars_func + self.extension = extension + if not options: + options = {} + + # Pull the options out and initialize the lookup + lookup_options = {} + for k, v in options.items(): + if k.startswith('mako.'): + lookup_options[k[5:]] = v + elif k in ['directories', 'filesystem_checks', 'module_directory']: + lookup_options[k] = v + self.lookup = TemplateLookup(**lookup_options) + + self.tmpl_options = {} + # transfer lookup args to template args, based on those available + # in getargspec + for kw in compat.inspect_getargspec(Template.__init__)[0]: + if kw in lookup_options: + self.tmpl_options[kw] = lookup_options[kw] + + def load_template(self, templatename, template_string=None): + """Loads a template from a file or a string""" + if template_string is not None: + return Template(template_string, **self.tmpl_options) + # Translate TG dot notation to normal / template path + if '/' not in templatename: + templatename = '/' + templatename.replace('.', '/') + '.' +\ + self.extension + + # Lookup template + return self.lookup.get_template(templatename) + + def render(self, info, format="html", fragment=False, template=None): + if isinstance(template, compat.string_types): + template = self.load_template(template) + + # Load extra vars func if provided + if self.extra_vars_func: + info.update(self.extra_vars_func()) + + return template.render(**info) diff --git a/server/www/packages/packages-windows/x86/mako/filters.py b/server/www/packages/packages-windows/x86/mako/filters.py new file mode 100644 index 0000000..c082690 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/filters.py @@ -0,0 +1,209 @@ +# mako/filters.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + +import re +import codecs + +from mako.compat import quote_plus, unquote_plus, codepoint2name, \ + name2codepoint + +from mako import compat + +xml_escapes = { + '&': '&', + '>': '>', + '<': '<', + '"': '"', # also " in html-only + "'": ''' # also ' in html-only +} + +# XXX: " is valid in HTML and XML +# ' is not valid HTML, but is valid XML + + +def legacy_html_escape(s): + """legacy HTML escape for non-unicode mode.""" + s = s.replace("&", "&") + s = s.replace(">", ">") + s = s.replace("<", "<") + s = s.replace('"', """) + s = s.replace("'", "'") + return s + + +try: + import markupsafe + html_escape = markupsafe.escape +except ImportError: + html_escape = legacy_html_escape + + +def xml_escape(string): + return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string) + + +def url_escape(string): + # convert into a list of octets + string = string.encode("utf8") + return quote_plus(string) + + +def legacy_url_escape(string): + # convert into a list of octets + return quote_plus(string) + + +def url_unescape(string): + text = unquote_plus(string) + if not is_ascii_str(text): + text = text.decode("utf8") + return text + + +def trim(string): + return string.strip() + + +class Decode(object): + + def __getattr__(self, key): + def decode(x): + if isinstance(x, compat.text_type): + return x + elif not isinstance(x, compat.binary_type): + return decode(str(x)) + else: + return compat.text_type(x, encoding=key) + return decode +decode = Decode() + + +_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z') + + +def is_ascii_str(text): + return isinstance(text, str) and _ASCII_re.match(text) + +################################################################ + + +class XMLEntityEscaper(object): + + def __init__(self, codepoint2name, name2codepoint): + self.codepoint2entity = dict([(c, compat.text_type('&%s;' % n)) + for c, n in codepoint2name.items()]) + self.name2codepoint = name2codepoint + + def escape_entities(self, text): + """Replace characters with their character entity references. + + Only characters corresponding to a named entity are replaced. + """ + return compat.text_type(text).translate(self.codepoint2entity) + + def __escape(self, m): + codepoint = ord(m.group()) + try: + return self.codepoint2entity[codepoint] + except (KeyError, IndexError): + return '&#x%X;' % codepoint + + __escapable = re.compile(r'["&<>]|[^\x00-\x7f]') + + def escape(self, text): + """Replace characters with their character references. + + Replace characters by their named entity references. + Non-ASCII characters, if they do not have a named entity reference, + are replaced by numerical character references. + + The return value is guaranteed to be ASCII. + """ + return self.__escapable.sub(self.__escape, compat.text_type(text) + ).encode('ascii') + + # XXX: This regexp will not match all valid XML entity names__. + # (It punts on details involving involving CombiningChars and Extenders.) + # + # .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef + __characterrefs = re.compile(r'''& (?: + \#(\d+) + | \#x([\da-f]+) + | ( (?!\d) [:\w] [-.:\w]+ ) + ) ;''', + re.X | re.UNICODE) + + def __unescape(self, m): + dval, hval, name = m.groups() + if dval: + codepoint = int(dval) + elif hval: + codepoint = int(hval, 16) + else: + codepoint = self.name2codepoint.get(name, 0xfffd) + # U+FFFD = "REPLACEMENT CHARACTER" + if codepoint < 128: + return chr(codepoint) + return chr(codepoint) + + def unescape(self, text): + """Unescape character references. + + All character references (both entity references and numerical + character references) are unescaped. + """ + return self.__characterrefs.sub(self.__unescape, text) + + +_html_entities_escaper = XMLEntityEscaper(codepoint2name, name2codepoint) + +html_entities_escape = _html_entities_escaper.escape_entities +html_entities_unescape = _html_entities_escaper.unescape + + +def htmlentityreplace_errors(ex): + """An encoding error handler. + + This python `codecs`_ error handler replaces unencodable + characters with HTML entities, or, if no HTML entity exists for + the character, XML character references. + + >>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace') + 'The cost was €12.' + """ + if isinstance(ex, UnicodeEncodeError): + # Handle encoding errors + bad_text = ex.object[ex.start:ex.end] + text = _html_entities_escaper.escape(bad_text) + return (compat.text_type(text), ex.end) + raise ex + +codecs.register_error('htmlentityreplace', htmlentityreplace_errors) + + +# TODO: options to make this dynamic per-compilation will be added in a later +# release +DEFAULT_ESCAPES = { + 'x': 'filters.xml_escape', + 'h': 'filters.html_escape', + 'u': 'filters.url_escape', + 'trim': 'filters.trim', + 'entity': 'filters.html_entities_escape', + 'unicode': 'unicode', + 'decode': 'decode', + 'str': 'str', + 'n': 'n' +} + +if compat.py3k: + DEFAULT_ESCAPES.update({ + 'unicode': 'str' + }) + +NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy() +NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape' +NON_UNICODE_ESCAPES['u'] = 'filters.legacy_url_escape' diff --git a/server/www/packages/packages-windows/x86/mako/lexer.py b/server/www/packages/packages-windows/x86/mako/lexer.py new file mode 100644 index 0000000..cf4187f --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/lexer.py @@ -0,0 +1,449 @@ +# mako/lexer.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""provides the Lexer class for parsing template strings into parse trees.""" + +import re +import codecs +from mako import parsetree, exceptions, compat +from mako.pygen import adjust_whitespace + +_regexp_cache = {} + + +class Lexer(object): + + def __init__(self, text, filename=None, + disable_unicode=False, + input_encoding=None, preprocessor=None): + self.text = text + self.filename = filename + self.template = parsetree.TemplateNode(self.filename) + self.matched_lineno = 1 + self.matched_charpos = 0 + self.lineno = 1 + self.match_position = 0 + self.tag = [] + self.control_line = [] + self.ternary_stack = [] + self.disable_unicode = disable_unicode + self.encoding = input_encoding + + if compat.py3k and disable_unicode: + raise exceptions.UnsupportedError( + "Mako for Python 3 does not " + "support disabling Unicode") + + if preprocessor is None: + self.preprocessor = [] + elif not hasattr(preprocessor, '__iter__'): + self.preprocessor = [preprocessor] + else: + self.preprocessor = preprocessor + + @property + def exception_kwargs(self): + return {'source': self.text, + 'lineno': self.matched_lineno, + 'pos': self.matched_charpos, + 'filename': self.filename} + + def match(self, regexp, flags=None): + """compile the given regexp, cache the reg, and call match_reg().""" + + try: + reg = _regexp_cache[(regexp, flags)] + except KeyError: + if flags: + reg = re.compile(regexp, flags) + else: + reg = re.compile(regexp) + _regexp_cache[(regexp, flags)] = reg + + return self.match_reg(reg) + + def match_reg(self, reg): + """match the given regular expression object to the current text + position. + + if a match occurs, update the current text and line position. + + """ + + mp = self.match_position + + match = reg.match(self.text, self.match_position) + if match: + (start, end) = match.span() + if end == start: + self.match_position = end + 1 + else: + self.match_position = end + self.matched_lineno = self.lineno + lines = re.findall(r"\n", self.text[mp:self.match_position]) + cp = mp - 1 + while (cp >= 0 and cp < self.textlength and self.text[cp] != '\n'): + cp -= 1 + self.matched_charpos = mp - cp + self.lineno += len(lines) + # print "MATCHED:", match.group(0), "LINE START:", + # self.matched_lineno, "LINE END:", self.lineno + # print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \ + # (match and "TRUE" or "FALSE") + return match + + def parse_until_text(self, watch_nesting, *text): + startpos = self.match_position + text_re = r'|'.join(text) + brace_level = 0 + paren_level = 0 + bracket_level = 0 + while True: + match = self.match(r'#.*\n') + if match: + continue + match = self.match(r'(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1', + re.S) + if match: + continue + match = self.match(r'(%s)' % text_re) + if match and not (watch_nesting + and (brace_level > 0 or paren_level > 0 + or bracket_level > 0)): + return \ + self.text[startpos: + self.match_position - len(match.group(1))],\ + match.group(1) + elif not match: + match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S) + if match: + brace_level += match.group(1).count('{') + brace_level -= match.group(1).count('}') + paren_level += match.group(1).count('(') + paren_level -= match.group(1).count(')') + bracket_level += match.group(1).count('[') + bracket_level -= match.group(1).count(']') + continue + raise exceptions.SyntaxException( + "Expected: %s" % + ','.join(text), + **self.exception_kwargs) + + def append_node(self, nodecls, *args, **kwargs): + kwargs.setdefault('source', self.text) + kwargs.setdefault('lineno', self.matched_lineno) + kwargs.setdefault('pos', self.matched_charpos) + kwargs['filename'] = self.filename + node = nodecls(*args, **kwargs) + if len(self.tag): + self.tag[-1].nodes.append(node) + else: + self.template.nodes.append(node) + # build a set of child nodes for the control line + # (used for loop variable detection) + # also build a set of child nodes on ternary control lines + # (used for determining if a pass needs to be auto-inserted + if self.control_line: + control_frame = self.control_line[-1] + control_frame.nodes.append(node) + if not (isinstance(node, parsetree.ControlLine) and + control_frame.is_ternary(node.keyword)): + if self.ternary_stack and self.ternary_stack[-1]: + self.ternary_stack[-1][-1].nodes.append(node) + if isinstance(node, parsetree.Tag): + if len(self.tag): + node.parent = self.tag[-1] + self.tag.append(node) + elif isinstance(node, parsetree.ControlLine): + if node.isend: + self.control_line.pop() + self.ternary_stack.pop() + elif node.is_primary: + self.control_line.append(node) + self.ternary_stack.append([]) + elif self.control_line and \ + self.control_line[-1].is_ternary(node.keyword): + self.ternary_stack[-1].append(node) + elif self.control_line and \ + not self.control_line[-1].is_ternary(node.keyword): + raise exceptions.SyntaxException( + "Keyword '%s' not a legal ternary for keyword '%s'" % + (node.keyword, self.control_line[-1].keyword), + **self.exception_kwargs) + + _coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n') + + def decode_raw_stream(self, text, decode_raw, known_encoding, filename): + """given string/unicode or bytes/string, determine encoding + from magic encoding comment, return body as unicode + or raw if decode_raw=False + + """ + if isinstance(text, compat.text_type): + m = self._coding_re.match(text) + encoding = m and m.group(1) or known_encoding or 'ascii' + return encoding, text + + if text.startswith(codecs.BOM_UTF8): + text = text[len(codecs.BOM_UTF8):] + parsed_encoding = 'utf-8' + m = self._coding_re.match(text.decode('utf-8', 'ignore')) + if m is not None and m.group(1) != 'utf-8': + raise exceptions.CompileException( + "Found utf-8 BOM in file, with conflicting " + "magic encoding comment of '%s'" % m.group(1), + text.decode('utf-8', 'ignore'), + 0, 0, filename) + else: + m = self._coding_re.match(text.decode('utf-8', 'ignore')) + if m: + parsed_encoding = m.group(1) + else: + parsed_encoding = known_encoding or 'ascii' + + if decode_raw: + try: + text = text.decode(parsed_encoding) + except UnicodeDecodeError: + raise exceptions.CompileException( + "Unicode decode operation of encoding '%s' failed" % + parsed_encoding, + text.decode('utf-8', 'ignore'), + 0, 0, filename) + + return parsed_encoding, text + + def parse(self): + self.encoding, self.text = self.decode_raw_stream( + self.text, + not self.disable_unicode, + self.encoding, + self.filename) + + for preproc in self.preprocessor: + self.text = preproc(self.text) + + # push the match marker past the + # encoding comment. + self.match_reg(self._coding_re) + + self.textlength = len(self.text) + + while (True): + if self.match_position > self.textlength: + break + + if self.match_end(): + break + if self.match_expression(): + continue + if self.match_control_line(): + continue + if self.match_comment(): + continue + if self.match_tag_start(): + continue + if self.match_tag_end(): + continue + if self.match_python_block(): + continue + if self.match_text(): + continue + + if self.match_position > self.textlength: + break + raise exceptions.CompileException("assertion failed") + + if len(self.tag): + raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % + self.tag[-1].keyword, + **self.exception_kwargs) + if len(self.control_line): + raise exceptions.SyntaxException( + "Unterminated control keyword: '%s'" % + self.control_line[-1].keyword, + self.text, + self.control_line[-1].lineno, + self.control_line[-1].pos, self.filename) + return self.template + + def match_tag_start(self): + match = self.match(r''' + \<% # opening tag + + ([\w\.\:]+) # keyword + + ((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \ + # sign, string expression + + \s* # more whitespace + + (/)?> # closing + + ''', + + re.I | re.S | re.X) + + if match: + keyword, attr, isend = match.groups() + self.keyword = keyword + attributes = {} + if attr: + for att in re.findall( + r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr): + key, val1, val2 = att + text = val1 or val2 + text = text.replace('\r\n', '\n') + attributes[key] = text + self.append_node(parsetree.Tag, keyword, attributes) + if isend: + self.tag.pop() + else: + if keyword == 'text': + match = self.match(r'(.*?)(?=\)', re.S) + if not match: + raise exceptions.SyntaxException( + "Unclosed tag: <%%%s>" % + self.tag[-1].keyword, + **self.exception_kwargs) + self.append_node(parsetree.Text, match.group(1)) + return self.match_tag_end() + return True + else: + return False + + def match_tag_end(self): + match = self.match(r'\') + if match: + if not len(self.tag): + raise exceptions.SyntaxException( + "Closing tag without opening tag: " % + match.group(1), + **self.exception_kwargs) + elif self.tag[-1].keyword != match.group(1): + raise exceptions.SyntaxException( + "Closing tag does not match tag: <%%%s>" % + (match.group(1), self.tag[-1].keyword), + **self.exception_kwargs) + self.tag.pop() + return True + else: + return False + + def match_end(self): + match = self.match(r'\Z', re.S) + if match: + string = match.group() + if string: + return string + else: + return True + else: + return False + + def match_text(self): + match = self.match(r""" + (.*?) # anything, followed by: + ( + (?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based + # comment preceded by a + # consumed newline and whitespace + | + (?=\${) # an expression + | + (?=') + # the trailing newline helps + # compiler.parse() not complain about indentation + text = adjust_whitespace(text) + "\n" + self.append_node( + parsetree.Code, + text, + match.group(1) == '!', lineno=line, pos=pos) + return True + else: + return False + + def match_expression(self): + match = self.match(r"\${") + if match: + line, pos = self.matched_lineno, self.matched_charpos + text, end = self.parse_until_text(True, r'\|', r'}') + if end == '|': + escapes, end = self.parse_until_text(True, r'}') + else: + escapes = "" + text = text.replace('\r\n', '\n') + self.append_node( + parsetree.Expression, + text, escapes.strip(), + lineno=line, pos=pos) + return True + else: + return False + + def match_control_line(self): + match = self.match( + r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)" + r"(?:\r?\n|\Z)", re.M) + if match: + operator = match.group(1) + text = match.group(2) + if operator == '%': + m2 = re.match(r'(end)?(\w+)\s*(.*)', text) + if not m2: + raise exceptions.SyntaxException( + "Invalid control line: '%s'" % + text, + **self.exception_kwargs) + isend, keyword = m2.group(1, 2) + isend = (isend is not None) + + if isend: + if not len(self.control_line): + raise exceptions.SyntaxException( + "No starting keyword '%s' for '%s'" % + (keyword, text), + **self.exception_kwargs) + elif self.control_line[-1].keyword != keyword: + raise exceptions.SyntaxException( + "Keyword '%s' doesn't match keyword '%s'" % + (text, self.control_line[-1].keyword), + **self.exception_kwargs) + self.append_node(parsetree.ControlLine, keyword, isend, text) + else: + self.append_node(parsetree.Comment, text) + return True + else: + return False + + def match_comment(self): + """matches the multiline version of a comment""" + match = self.match(r"<%doc>(.*?)", re.S) + if match: + self.append_node(parsetree.Comment, match.group(1)) + return True + else: + return False diff --git a/server/www/packages/packages-windows/x86/mako/lookup.py b/server/www/packages/packages-windows/x86/mako/lookup.py new file mode 100644 index 0000000..0d3f304 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/lookup.py @@ -0,0 +1,369 @@ +# mako/lookup.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import os +import stat +import posixpath +import re +from mako import exceptions, util +from mako.template import Template + +try: + import threading +except: + import dummy_threading as threading + + +class TemplateCollection(object): + + """Represent a collection of :class:`.Template` objects, + identifiable via URI. + + A :class:`.TemplateCollection` is linked to the usage of + all template tags that address other templates, such + as ``<%include>``, ``<%namespace>``, and ``<%inherit>``. + The ``file`` attribute of each of those tags refers + to a string URI that is passed to that :class:`.Template` + object's :class:`.TemplateCollection` for resolution. + + :class:`.TemplateCollection` is an abstract class, + with the usual default implementation being :class:`.TemplateLookup`. + + """ + + def has_template(self, uri): + """Return ``True`` if this :class:`.TemplateLookup` is + capable of returning a :class:`.Template` object for the + given ``uri``. + + :param uri: String URI of the template to be resolved. + + """ + try: + self.get_template(uri) + return True + except exceptions.TemplateLookupException: + return False + + def get_template(self, uri, relativeto=None): + """Return a :class:`.Template` object corresponding to the given + ``uri``. + + The default implementation raises + :class:`.NotImplementedError`. Implementations should + raise :class:`.TemplateLookupException` if the given ``uri`` + cannot be resolved. + + :param uri: String URI of the template to be resolved. + :param relativeto: if present, the given ``uri`` is assumed to + be relative to this URI. + + """ + raise NotImplementedError() + + def filename_to_uri(self, uri, filename): + """Convert the given ``filename`` to a URI relative to + this :class:`.TemplateCollection`.""" + + return uri + + def adjust_uri(self, uri, filename): + """Adjust the given ``uri`` based on the calling ``filename``. + + When this method is called from the runtime, the + ``filename`` parameter is taken directly to the ``filename`` + attribute of the calling template. Therefore a custom + :class:`.TemplateCollection` subclass can place any string + identifier desired in the ``filename`` parameter of the + :class:`.Template` objects it constructs and have them come back + here. + + """ + return uri + + +class TemplateLookup(TemplateCollection): + + """Represent a collection of templates that locates template source files + from the local filesystem. + + The primary argument is the ``directories`` argument, the list of + directories to search: + + .. sourcecode:: python + + lookup = TemplateLookup(["/path/to/templates"]) + some_template = lookup.get_template("/index.html") + + The :class:`.TemplateLookup` can also be given :class:`.Template` objects + programatically using :meth:`.put_string` or :meth:`.put_template`: + + .. sourcecode:: python + + lookup = TemplateLookup() + lookup.put_string("base.html", ''' + ${self.next()} + ''') + lookup.put_string("hello.html", ''' + <%include file='base.html'/> + + Hello, world ! + ''') + + + :param directories: A list of directory names which will be + searched for a particular template URI. The URI is appended + to each directory and the filesystem checked. + + :param collection_size: Approximate size of the collection used + to store templates. If left at its default of ``-1``, the size + is unbounded, and a plain Python dictionary is used to + relate URI strings to :class:`.Template` instances. + Otherwise, a least-recently-used cache object is used which + will maintain the size of the collection approximately to + the number given. + + :param filesystem_checks: When at its default value of ``True``, + each call to :meth:`.TemplateLookup.get_template()` will + compare the filesystem last modified time to the time in + which an existing :class:`.Template` object was created. + This allows the :class:`.TemplateLookup` to regenerate a + new :class:`.Template` whenever the original source has + been updated. Set this to ``False`` for a very minor + performance increase. + + :param modulename_callable: A callable which, when present, + is passed the path of the source file as well as the + requested URI, and then returns the full path of the + generated Python module file. This is used to inject + alternate schemes for Python module location. If left at + its default of ``None``, the built in system of generation + based on ``module_directory`` plus ``uri`` is used. + + All other keyword parameters available for + :class:`.Template` are mirrored here. When new + :class:`.Template` objects are created, the keywords + established with this :class:`.TemplateLookup` are passed on + to each new :class:`.Template`. + + """ + + def __init__(self, + directories=None, + module_directory=None, + filesystem_checks=True, + collection_size=-1, + format_exceptions=False, + error_handler=None, + disable_unicode=False, + bytestring_passthrough=False, + output_encoding=None, + encoding_errors='strict', + + cache_args=None, + cache_impl='beaker', + cache_enabled=True, + cache_type=None, + cache_dir=None, + cache_url=None, + + modulename_callable=None, + module_writer=None, + default_filters=None, + buffer_filters=(), + strict_undefined=False, + imports=None, + future_imports=None, + enable_loop=True, + input_encoding=None, + preprocessor=None, + lexer_cls=None, + include_error_handler=None): + + self.directories = [posixpath.normpath(d) for d in + util.to_list(directories, ()) + ] + self.module_directory = module_directory + self.modulename_callable = modulename_callable + self.filesystem_checks = filesystem_checks + self.collection_size = collection_size + + if cache_args is None: + cache_args = {} + # transfer deprecated cache_* args + if cache_dir: + cache_args.setdefault('dir', cache_dir) + if cache_url: + cache_args.setdefault('url', cache_url) + if cache_type: + cache_args.setdefault('type', cache_type) + + self.template_args = { + 'format_exceptions': format_exceptions, + 'error_handler': error_handler, + 'include_error_handler': include_error_handler, + 'disable_unicode': disable_unicode, + 'bytestring_passthrough': bytestring_passthrough, + 'output_encoding': output_encoding, + 'cache_impl': cache_impl, + 'encoding_errors': encoding_errors, + 'input_encoding': input_encoding, + 'module_directory': module_directory, + 'module_writer': module_writer, + 'cache_args': cache_args, + 'cache_enabled': cache_enabled, + 'default_filters': default_filters, + 'buffer_filters': buffer_filters, + 'strict_undefined': strict_undefined, + 'imports': imports, + 'future_imports': future_imports, + 'enable_loop': enable_loop, + 'preprocessor': preprocessor, + 'lexer_cls': lexer_cls + } + + if collection_size == -1: + self._collection = {} + self._uri_cache = {} + else: + self._collection = util.LRUCache(collection_size) + self._uri_cache = util.LRUCache(collection_size) + self._mutex = threading.Lock() + + def get_template(self, uri): + """Return a :class:`.Template` object corresponding to the given + ``uri``. + + .. note:: The ``relativeto`` argument is not supported here at + the moment. + + """ + + try: + if self.filesystem_checks: + return self._check(uri, self._collection[uri]) + else: + return self._collection[uri] + except KeyError: + u = re.sub(r'^\/+', '', uri) + for dir in self.directories: + # make sure the path seperators are posix - os.altsep is empty + # on POSIX and cannot be used. + dir = dir.replace(os.path.sep, posixpath.sep) + srcfile = posixpath.normpath(posixpath.join(dir, u)) + if os.path.isfile(srcfile): + return self._load(srcfile, uri) + else: + raise exceptions.TopLevelLookupException( + "Cant locate template for uri %r" % uri) + + def adjust_uri(self, uri, relativeto): + """Adjust the given ``uri`` based on the given relative URI.""" + + key = (uri, relativeto) + if key in self._uri_cache: + return self._uri_cache[key] + + if uri[0] != '/': + if relativeto is not None: + v = self._uri_cache[key] = posixpath.join( + posixpath.dirname(relativeto), uri) + else: + v = self._uri_cache[key] = '/' + uri + else: + v = self._uri_cache[key] = uri + return v + + def filename_to_uri(self, filename): + """Convert the given ``filename`` to a URI relative to + this :class:`.TemplateCollection`.""" + + try: + return self._uri_cache[filename] + except KeyError: + value = self._relativeize(filename) + self._uri_cache[filename] = value + return value + + def _relativeize(self, filename): + """Return the portion of a filename that is 'relative' + to the directories in this lookup. + + """ + + filename = posixpath.normpath(filename) + for dir in self.directories: + if filename[0:len(dir)] == dir: + return filename[len(dir):] + else: + return None + + def _load(self, filename, uri): + self._mutex.acquire() + try: + try: + # try returning from collection one + # more time in case concurrent thread already loaded + return self._collection[uri] + except KeyError: + pass + try: + if self.modulename_callable is not None: + module_filename = self.modulename_callable(filename, uri) + else: + module_filename = None + self._collection[uri] = template = Template( + uri=uri, + filename=posixpath.normpath(filename), + lookup=self, + module_filename=module_filename, + **self.template_args) + return template + except: + # if compilation fails etc, ensure + # template is removed from collection, + # re-raise + self._collection.pop(uri, None) + raise + finally: + self._mutex.release() + + def _check(self, uri, template): + if template.filename is None: + return template + + try: + template_stat = os.stat(template.filename) + if template.module._modified_time < \ + template_stat[stat.ST_MTIME]: + self._collection.pop(uri, None) + return self._load(template.filename, uri) + else: + return template + except OSError: + self._collection.pop(uri, None) + raise exceptions.TemplateLookupException( + "Cant locate template for uri %r" % uri) + + def put_string(self, uri, text): + """Place a new :class:`.Template` object into this + :class:`.TemplateLookup`, based on the given string of + ``text``. + + """ + self._collection[uri] = Template( + text, + lookup=self, + uri=uri, + **self.template_args) + + def put_template(self, uri, template): + """Place a new :class:`.Template` object into this + :class:`.TemplateLookup`, based on the given + :class:`.Template` object. + + """ + self._collection[uri] = template diff --git a/server/www/packages/packages-windows/x86/mako/parsetree.py b/server/www/packages/packages-windows/x86/mako/parsetree.py new file mode 100644 index 0000000..e129916 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/parsetree.py @@ -0,0 +1,616 @@ +# mako/parsetree.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""defines the parse tree components for Mako templates.""" + +from mako import exceptions, ast, util, filters, compat +import re + + +class Node(object): + + """base class for a Node in the parse tree.""" + + def __init__(self, source, lineno, pos, filename): + self.source = source + self.lineno = lineno + self.pos = pos + self.filename = filename + + @property + def exception_kwargs(self): + return {'source': self.source, 'lineno': self.lineno, + 'pos': self.pos, 'filename': self.filename} + + def get_children(self): + return [] + + def accept_visitor(self, visitor): + def traverse(node): + for n in node.get_children(): + n.accept_visitor(visitor) + + method = getattr(visitor, "visit" + self.__class__.__name__, traverse) + method(self) + + +class TemplateNode(Node): + + """a 'container' node that stores the overall collection of nodes.""" + + def __init__(self, filename): + super(TemplateNode, self).__init__('', 0, 0, filename) + self.nodes = [] + self.page_attributes = {} + + def get_children(self): + return self.nodes + + def __repr__(self): + return "TemplateNode(%s, %r)" % ( + util.sorted_dict_repr(self.page_attributes), + self.nodes) + + +class ControlLine(Node): + + """defines a control line, a line-oriented python line or end tag. + + e.g.:: + + % if foo: + (markup) + % endif + + """ + + has_loop_context = False + + def __init__(self, keyword, isend, text, **kwargs): + super(ControlLine, self).__init__(**kwargs) + self.text = text + self.keyword = keyword + self.isend = isend + self.is_primary = keyword in ['for', 'if', 'while', 'try', 'with'] + self.nodes = [] + if self.isend: + self._declared_identifiers = [] + self._undeclared_identifiers = [] + else: + code = ast.PythonFragment(text, **self.exception_kwargs) + self._declared_identifiers = code.declared_identifiers + self._undeclared_identifiers = code.undeclared_identifiers + + def get_children(self): + return self.nodes + + def declared_identifiers(self): + return self._declared_identifiers + + def undeclared_identifiers(self): + return self._undeclared_identifiers + + def is_ternary(self, keyword): + """return true if the given keyword is a ternary keyword + for this ControlLine""" + + return keyword in { + 'if': set(['else', 'elif']), + 'try': set(['except', 'finally']), + 'for': set(['else']) + }.get(self.keyword, []) + + def __repr__(self): + return "ControlLine(%r, %r, %r, %r)" % ( + self.keyword, + self.text, + self.isend, + (self.lineno, self.pos) + ) + + +class Text(Node): + + """defines plain text in the template.""" + + def __init__(self, content, **kwargs): + super(Text, self).__init__(**kwargs) + self.content = content + + def __repr__(self): + return "Text(%r, %r)" % (self.content, (self.lineno, self.pos)) + + +class Code(Node): + + """defines a Python code block, either inline or module level. + + e.g.:: + + inline: + <% + x = 12 + %> + + module level: + <%! + import logger + %> + + """ + + def __init__(self, text, ismodule, **kwargs): + super(Code, self).__init__(**kwargs) + self.text = text + self.ismodule = ismodule + self.code = ast.PythonCode(text, **self.exception_kwargs) + + def declared_identifiers(self): + return self.code.declared_identifiers + + def undeclared_identifiers(self): + return self.code.undeclared_identifiers + + def __repr__(self): + return "Code(%r, %r, %r)" % ( + self.text, + self.ismodule, + (self.lineno, self.pos) + ) + + +class Comment(Node): + + """defines a comment line. + + # this is a comment + + """ + + def __init__(self, text, **kwargs): + super(Comment, self).__init__(**kwargs) + self.text = text + + def __repr__(self): + return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos)) + + +class Expression(Node): + + """defines an inline expression. + + ${x+y} + + """ + + def __init__(self, text, escapes, **kwargs): + super(Expression, self).__init__(**kwargs) + self.text = text + self.escapes = escapes + self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs) + self.code = ast.PythonCode(text, **self.exception_kwargs) + + def declared_identifiers(self): + return [] + + def undeclared_identifiers(self): + # TODO: make the "filter" shortcut list configurable at parse/gen time + return self.code.undeclared_identifiers.union( + self.escapes_code.undeclared_identifiers.difference( + set(filters.DEFAULT_ESCAPES.keys()) + ) + ).difference(self.code.declared_identifiers) + + def __repr__(self): + return "Expression(%r, %r, %r)" % ( + self.text, + self.escapes_code.args, + (self.lineno, self.pos) + ) + + +class _TagMeta(type): + + """metaclass to allow Tag to produce a subclass according to + its keyword""" + + _classmap = {} + + def __init__(cls, clsname, bases, dict): + if getattr(cls, '__keyword__', None) is not None: + cls._classmap[cls.__keyword__] = cls + super(_TagMeta, cls).__init__(clsname, bases, dict) + + def __call__(cls, keyword, attributes, **kwargs): + if ":" in keyword: + ns, defname = keyword.split(':') + return type.__call__(CallNamespaceTag, ns, defname, + attributes, **kwargs) + + try: + cls = _TagMeta._classmap[keyword] + except KeyError: + raise exceptions.CompileException( + "No such tag: '%s'" % keyword, + source=kwargs['source'], + lineno=kwargs['lineno'], + pos=kwargs['pos'], + filename=kwargs['filename'] + ) + return type.__call__(cls, keyword, attributes, **kwargs) + + +class Tag(compat.with_metaclass(_TagMeta, Node)): + + """abstract base class for tags. + + <%sometag/> + + <%someothertag> + stuff + + + """ + __keyword__ = None + + def __init__(self, keyword, attributes, expressions, + nonexpressions, required, **kwargs): + r"""construct a new Tag instance. + + this constructor not called directly, and is only called + by subclasses. + + :param keyword: the tag keyword + + :param attributes: raw dictionary of attribute key/value pairs + + :param expressions: a set of identifiers that are legal attributes, + which can also contain embedded expressions + + :param nonexpressions: a set of identifiers that are legal + attributes, which cannot contain embedded expressions + + :param \**kwargs: + other arguments passed to the Node superclass (lineno, pos) + + """ + super(Tag, self).__init__(**kwargs) + self.keyword = keyword + self.attributes = attributes + self._parse_attributes(expressions, nonexpressions) + missing = [r for r in required if r not in self.parsed_attributes] + if len(missing): + raise exceptions.CompileException( + "Missing attribute(s): %s" % + ",".join([repr(m) for m in missing]), + **self.exception_kwargs) + self.parent = None + self.nodes = [] + + def is_root(self): + return self.parent is None + + def get_children(self): + return self.nodes + + def _parse_attributes(self, expressions, nonexpressions): + undeclared_identifiers = set() + self.parsed_attributes = {} + for key in self.attributes: + if key in expressions: + expr = [] + for x in re.compile(r'(\${.+?})', + re.S).split(self.attributes[key]): + m = re.compile(r'^\${(.+?)}$', re.S).match(x) + if m: + code = ast.PythonCode(m.group(1).rstrip(), + **self.exception_kwargs) + # we aren't discarding "declared_identifiers" here, + # which we do so that list comprehension-declared + # variables aren't counted. As yet can't find a + # condition that requires it here. + undeclared_identifiers = \ + undeclared_identifiers.union( + code.undeclared_identifiers) + expr.append('(%s)' % m.group(1)) + else: + if x: + expr.append(repr(x)) + self.parsed_attributes[key] = " + ".join(expr) or repr('') + elif key in nonexpressions: + if re.search(r'\${.+?}', self.attributes[key]): + raise exceptions.CompileException( + "Attibute '%s' in tag '%s' does not allow embedded " + "expressions" % (key, self.keyword), + **self.exception_kwargs) + self.parsed_attributes[key] = repr(self.attributes[key]) + else: + raise exceptions.CompileException( + "Invalid attribute for tag '%s': '%s'" % + (self.keyword, key), + **self.exception_kwargs) + self.expression_undeclared_identifiers = undeclared_identifiers + + def declared_identifiers(self): + return [] + + def undeclared_identifiers(self): + return self.expression_undeclared_identifiers + + def __repr__(self): + return "%s(%r, %s, %r, %r)" % (self.__class__.__name__, + self.keyword, + util.sorted_dict_repr(self.attributes), + (self.lineno, self.pos), + self.nodes + ) + + +class IncludeTag(Tag): + __keyword__ = 'include' + + def __init__(self, keyword, attributes, **kwargs): + super(IncludeTag, self).__init__( + keyword, + attributes, + ('file', 'import', 'args'), + (), ('file',), **kwargs) + self.page_args = ast.PythonCode( + "__DUMMY(%s)" % attributes.get('args', ''), + **self.exception_kwargs) + + def declared_identifiers(self): + return [] + + def undeclared_identifiers(self): + identifiers = self.page_args.undeclared_identifiers.\ + difference(set(["__DUMMY"])).\ + difference(self.page_args.declared_identifiers) + return identifiers.union(super(IncludeTag, self). + undeclared_identifiers()) + + +class NamespaceTag(Tag): + __keyword__ = 'namespace' + + def __init__(self, keyword, attributes, **kwargs): + super(NamespaceTag, self).__init__( + keyword, attributes, + ('file',), + ('name', 'inheritable', + 'import', 'module'), + (), **kwargs) + + self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self)))) + if 'name' not in attributes and 'import' not in attributes: + raise exceptions.CompileException( + "'name' and/or 'import' attributes are required " + "for <%namespace>", + **self.exception_kwargs) + if 'file' in attributes and 'module' in attributes: + raise exceptions.CompileException( + "<%namespace> may only have one of 'file' or 'module'", + **self.exception_kwargs + ) + + def declared_identifiers(self): + return [] + + +class TextTag(Tag): + __keyword__ = 'text' + + def __init__(self, keyword, attributes, **kwargs): + super(TextTag, self).__init__( + keyword, + attributes, (), + ('filter'), (), **kwargs) + self.filter_args = ast.ArgumentList( + attributes.get('filter', ''), + **self.exception_kwargs) + + def undeclared_identifiers(self): + return self.filter_args.\ + undeclared_identifiers.\ + difference(filters.DEFAULT_ESCAPES.keys()).union( + self.expression_undeclared_identifiers + ) + + +class DefTag(Tag): + __keyword__ = 'def' + + def __init__(self, keyword, attributes, **kwargs): + expressions = ['buffered', 'cached'] + [ + c for c in attributes if c.startswith('cache_')] + + super(DefTag, self).__init__( + keyword, + attributes, + expressions, + ('name', 'filter', 'decorator'), + ('name',), + **kwargs) + name = attributes['name'] + if re.match(r'^[\w_]+$', name): + raise exceptions.CompileException( + "Missing parenthesis in %def", + **self.exception_kwargs) + self.function_decl = ast.FunctionDecl("def " + name + ":pass", + **self.exception_kwargs) + self.name = self.function_decl.funcname + self.decorator = attributes.get('decorator', '') + self.filter_args = ast.ArgumentList( + attributes.get('filter', ''), + **self.exception_kwargs) + + is_anonymous = False + is_block = False + + @property + def funcname(self): + return self.function_decl.funcname + + def get_argument_expressions(self, **kw): + return self.function_decl.get_argument_expressions(**kw) + + def declared_identifiers(self): + return self.function_decl.allargnames + + def undeclared_identifiers(self): + res = [] + for c in self.function_decl.defaults: + res += list(ast.PythonCode(c, **self.exception_kwargs). + undeclared_identifiers) + return set(res).union( + self.filter_args. + undeclared_identifiers. + difference(filters.DEFAULT_ESCAPES.keys()) + ).union( + self.expression_undeclared_identifiers + ).difference( + self.function_decl.allargnames + ) + + +class BlockTag(Tag): + __keyword__ = 'block' + + def __init__(self, keyword, attributes, **kwargs): + expressions = ['buffered', 'cached', 'args'] + [ + c for c in attributes if c.startswith('cache_')] + + super(BlockTag, self).__init__( + keyword, + attributes, + expressions, + ('name', 'filter', 'decorator'), + (), + **kwargs) + name = attributes.get('name') + if name and not re.match(r'^[\w_]+$', name): + raise exceptions.CompileException( + "%block may not specify an argument signature", + **self.exception_kwargs) + if not name and attributes.get('args', None): + raise exceptions.CompileException( + "Only named %blocks may specify args", + **self.exception_kwargs + ) + self.body_decl = ast.FunctionArgs(attributes.get('args', ''), + **self.exception_kwargs) + + self.name = name + self.decorator = attributes.get('decorator', '') + self.filter_args = ast.ArgumentList( + attributes.get('filter', ''), + **self.exception_kwargs) + + is_block = True + + @property + def is_anonymous(self): + return self.name is None + + @property + def funcname(self): + return self.name or "__M_anon_%d" % (self.lineno, ) + + def get_argument_expressions(self, **kw): + return self.body_decl.get_argument_expressions(**kw) + + def declared_identifiers(self): + return self.body_decl.allargnames + + def undeclared_identifiers(self): + return (self.filter_args. + undeclared_identifiers. + difference(filters.DEFAULT_ESCAPES.keys()) + ).union(self.expression_undeclared_identifiers) + + +class CallTag(Tag): + __keyword__ = 'call' + + def __init__(self, keyword, attributes, **kwargs): + super(CallTag, self).__init__(keyword, attributes, + ('args'), ('expr',), ('expr',), **kwargs) + self.expression = attributes['expr'] + self.code = ast.PythonCode(self.expression, **self.exception_kwargs) + self.body_decl = ast.FunctionArgs(attributes.get('args', ''), + **self.exception_kwargs) + + def declared_identifiers(self): + return self.code.declared_identifiers.union(self.body_decl.allargnames) + + def undeclared_identifiers(self): + return self.code.undeclared_identifiers.\ + difference(self.code.declared_identifiers) + + +class CallNamespaceTag(Tag): + + def __init__(self, namespace, defname, attributes, **kwargs): + super(CallNamespaceTag, self).__init__( + namespace + ":" + defname, + attributes, + tuple(attributes.keys()) + ('args', ), + (), + (), + **kwargs) + + self.expression = "%s.%s(%s)" % ( + namespace, + defname, + ",".join(["%s=%s" % (k, v) for k, v in + self.parsed_attributes.items() + if k != 'args']) + ) + self.code = ast.PythonCode(self.expression, **self.exception_kwargs) + self.body_decl = ast.FunctionArgs( + attributes.get('args', ''), + **self.exception_kwargs) + + def declared_identifiers(self): + return self.code.declared_identifiers.union(self.body_decl.allargnames) + + def undeclared_identifiers(self): + return self.code.undeclared_identifiers.\ + difference(self.code.declared_identifiers) + + +class InheritTag(Tag): + __keyword__ = 'inherit' + + def __init__(self, keyword, attributes, **kwargs): + super(InheritTag, self).__init__( + keyword, attributes, + ('file',), (), ('file',), **kwargs) + + +class PageTag(Tag): + __keyword__ = 'page' + + def __init__(self, keyword, attributes, **kwargs): + expressions = \ + ['cached', 'args', 'expression_filter', 'enable_loop'] + \ + [c for c in attributes if c.startswith('cache_')] + + super(PageTag, self).__init__( + keyword, + attributes, + expressions, + (), + (), + **kwargs) + self.body_decl = ast.FunctionArgs(attributes.get('args', ''), + **self.exception_kwargs) + self.filter_args = ast.ArgumentList( + attributes.get('expression_filter', ''), + **self.exception_kwargs) + + def declared_identifiers(self): + return self.body_decl.allargnames diff --git a/server/www/packages/packages-windows/x86/mako/pygen.py b/server/www/packages/packages-windows/x86/mako/pygen.py new file mode 100644 index 0000000..8514e02 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/pygen.py @@ -0,0 +1,303 @@ +# mako/pygen.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""utilities for generating and formatting literal Python code.""" + +import re +from mako import exceptions + + +class PythonPrinter(object): + + def __init__(self, stream): + # indentation counter + self.indent = 0 + + # a stack storing information about why we incremented + # the indentation counter, to help us determine if we + # should decrement it + self.indent_detail = [] + + # the string of whitespace multiplied by the indent + # counter to produce a line + self.indentstring = " " + + # the stream we are writing to + self.stream = stream + + # current line number + self.lineno = 1 + + # a list of lines that represents a buffered "block" of code, + # which can be later printed relative to an indent level + self.line_buffer = [] + + self.in_indent_lines = False + + self._reset_multi_line_flags() + + # mapping of generated python lines to template + # source lines + self.source_map = {} + + def _update_lineno(self, num): + self.lineno += num + + def start_source(self, lineno): + if self.lineno not in self.source_map: + self.source_map[self.lineno] = lineno + + def write_blanks(self, num): + self.stream.write("\n" * num) + self._update_lineno(num) + + def write_indented_block(self, block): + """print a line or lines of python which already contain indentation. + + The indentation of the total block of lines will be adjusted to that of + the current indent level.""" + self.in_indent_lines = False + for l in re.split(r'\r?\n', block): + self.line_buffer.append(l) + self._update_lineno(1) + + def writelines(self, *lines): + """print a series of lines of python.""" + for line in lines: + self.writeline(line) + + def writeline(self, line): + """print a line of python, indenting it according to the current + indent level. + + this also adjusts the indentation counter according to the + content of the line. + + """ + + if not self.in_indent_lines: + self._flush_adjusted_lines() + self.in_indent_lines = True + + if ( + line is None or + re.match(r"^\s*#", line) or + re.match(r"^\s*$", line) + ): + hastext = False + else: + hastext = True + + is_comment = line and len(line) and line[0] == '#' + + # see if this line should decrease the indentation level + if ( + not is_comment and + (not hastext or self._is_unindentor(line)) + ): + + if self.indent > 0: + self.indent -= 1 + # if the indent_detail stack is empty, the user + # probably put extra closures - the resulting + # module wont compile. + if len(self.indent_detail) == 0: + raise exceptions.SyntaxException( + "Too many whitespace closures") + self.indent_detail.pop() + + if line is None: + return + + # write the line + self.stream.write(self._indent_line(line) + "\n") + self._update_lineno(len(line.split("\n"))) + + # see if this line should increase the indentation level. + # note that a line can both decrase (before printing) and + # then increase (after printing) the indentation level. + + if re.search(r":[ \t]*(?:#.*)?$", line): + # increment indentation count, and also + # keep track of what the keyword was that indented us, + # if it is a python compound statement keyword + # where we might have to look for an "unindent" keyword + match = re.match(r"^\s*(if|try|elif|while|for|with)", line) + if match: + # its a "compound" keyword, so we will check for "unindentors" + indentor = match.group(1) + self.indent += 1 + self.indent_detail.append(indentor) + else: + indentor = None + # its not a "compound" keyword. but lets also + # test for valid Python keywords that might be indenting us, + # else assume its a non-indenting line + m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", + line) + if m2: + self.indent += 1 + self.indent_detail.append(indentor) + + def close(self): + """close this printer, flushing any remaining lines.""" + self._flush_adjusted_lines() + + def _is_unindentor(self, line): + """return true if the given line is an 'unindentor', + relative to the last 'indent' event received. + + """ + + # no indentation detail has been pushed on; return False + if len(self.indent_detail) == 0: + return False + + indentor = self.indent_detail[-1] + + # the last indent keyword we grabbed is not a + # compound statement keyword; return False + if indentor is None: + return False + + # if the current line doesnt have one of the "unindentor" keywords, + # return False + match = re.match(r"^\s*(else|elif|except|finally).*\:", line) + if not match: + return False + + # whitespace matches up, we have a compound indentor, + # and this line has an unindentor, this + # is probably good enough + return True + + # should we decide that its not good enough, heres + # more stuff to check. + # keyword = match.group(1) + + # match the original indent keyword + # for crit in [ + # (r'if|elif', r'else|elif'), + # (r'try', r'except|finally|else'), + # (r'while|for', r'else'), + # ]: + # if re.match(crit[0], indentor) and re.match(crit[1], keyword): + # return True + + # return False + + def _indent_line(self, line, stripspace=''): + """indent the given line according to the current indent level. + + stripspace is a string of space that will be truncated from the + start of the line before indenting.""" + + return re.sub(r"^%s" % stripspace, self.indentstring + * self.indent, line) + + def _reset_multi_line_flags(self): + """reset the flags which would indicate we are in a backslashed + or triple-quoted section.""" + + self.backslashed, self.triplequoted = False, False + + def _in_multi_line(self, line): + """return true if the given line is part of a multi-line block, + via backslash or triple-quote.""" + + # we are only looking for explicitly joined lines here, not + # implicit ones (i.e. brackets, braces etc.). this is just to + # guard against the possibility of modifying the space inside of + # a literal multiline string with unfortunately placed + # whitespace + + current_state = (self.backslashed or self.triplequoted) + + if re.search(r"\\$", line): + self.backslashed = True + else: + self.backslashed = False + + triples = len(re.findall(r"\"\"\"|\'\'\'", line)) + if triples == 1 or triples % 2 != 0: + self.triplequoted = not self.triplequoted + + return current_state + + def _flush_adjusted_lines(self): + stripspace = None + self._reset_multi_line_flags() + + for entry in self.line_buffer: + if self._in_multi_line(entry): + self.stream.write(entry + "\n") + else: + entry = entry.expandtabs() + if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry): + stripspace = re.match(r"^([ \t]*)", entry).group(1) + self.stream.write(self._indent_line(entry, stripspace) + "\n") + + self.line_buffer = [] + self._reset_multi_line_flags() + + +def adjust_whitespace(text): + """remove the left-whitespace margin of a block of Python code.""" + + state = [False, False] + (backslashed, triplequoted) = (0, 1) + + def in_multi_line(line): + start_state = (state[backslashed] or state[triplequoted]) + + if re.search(r"\\$", line): + state[backslashed] = True + else: + state[backslashed] = False + + def match(reg, t): + m = re.match(reg, t) + if m: + return m, t[len(m.group(0)):] + else: + return None, t + + while line: + if state[triplequoted]: + m, line = match(r"%s" % state[triplequoted], line) + if m: + state[triplequoted] = False + else: + m, line = match(r".*?(?=%s|$)" % state[triplequoted], line) + else: + m, line = match(r'#', line) + if m: + return start_state + + m, line = match(r"\"\"\"|\'\'\'", line) + if m: + state[triplequoted] = m.group(0) + continue + + m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line) + + return start_state + + def _indent_line(line, stripspace=''): + return re.sub(r"^%s" % stripspace, '', line) + + lines = [] + stripspace = None + + for line in re.split(r'\r?\n', text): + if in_multi_line(line): + lines.append(line) + else: + line = line.expandtabs() + if stripspace is None and re.search(r"^[ \t]*[^# \t]", line): + stripspace = re.match(r"^([ \t]*)", line).group(1) + lines.append(_indent_line(line, stripspace)) + return "\n".join(lines) diff --git a/server/www/packages/packages-windows/x86/mako/pyparser.py b/server/www/packages/packages-windows/x86/mako/pyparser.py new file mode 100644 index 0000000..15d0da6 --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/pyparser.py @@ -0,0 +1,233 @@ +# mako/pyparser.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Handles parsing of Python code. + +Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler +module is used. +""" + +from mako import exceptions, util, compat +from mako.compat import arg_stringname +import operator + +if compat.py3k: + # words that cannot be assigned to (notably + # smaller than the total keys in __builtins__) + reserved = set(['True', 'False', 'None', 'print']) + + # the "id" attribute on a function node + arg_id = operator.attrgetter('arg') +else: + # words that cannot be assigned to (notably + # smaller than the total keys in __builtins__) + reserved = set(['True', 'False', 'None']) + + # the "id" attribute on a function node + arg_id = operator.attrgetter('id') + +import _ast +util.restore__ast(_ast) +from mako import _ast_util + + +def parse(code, mode='exec', **exception_kwargs): + """Parse an expression into AST""" + + try: + return _ast_util.parse(code, '', mode) + except Exception: + raise exceptions.SyntaxException( + "(%s) %s (%r)" % ( + compat.exception_as().__class__.__name__, + compat.exception_as(), + code[0:50] + ), **exception_kwargs) + + +class FindIdentifiers(_ast_util.NodeVisitor): + + def __init__(self, listener, **exception_kwargs): + self.in_function = False + self.in_assign_targets = False + self.local_ident_stack = set() + self.listener = listener + self.exception_kwargs = exception_kwargs + + def _add_declared(self, name): + if not self.in_function: + self.listener.declared_identifiers.add(name) + else: + self.local_ident_stack.add(name) + + def visit_ClassDef(self, node): + self._add_declared(node.name) + + def visit_Assign(self, node): + + # flip around the visiting of Assign so the expression gets + # evaluated first, in the case of a clause like "x=x+5" (x + # is undeclared) + + self.visit(node.value) + in_a = self.in_assign_targets + self.in_assign_targets = True + for n in node.targets: + self.visit(n) + self.in_assign_targets = in_a + + if compat.py3k: + + # ExceptHandler is in Python 2, but this block only works in + # Python 3 (and is required there) + + def visit_ExceptHandler(self, node): + if node.name is not None: + self._add_declared(node.name) + if node.type is not None: + self.visit(node.type) + for statement in node.body: + self.visit(statement) + + def visit_Lambda(self, node, *args): + self._visit_function(node, True) + + def visit_FunctionDef(self, node): + self._add_declared(node.name) + self._visit_function(node, False) + + def _expand_tuples(self, args): + for arg in args: + if isinstance(arg, _ast.Tuple): + for n in arg.elts: + yield n + else: + yield arg + + def _visit_function(self, node, islambda): + + # push function state onto stack. dont log any more + # identifiers as "declared" until outside of the function, + # but keep logging identifiers as "undeclared". track + # argument names in each function header so they arent + # counted as "undeclared" + + inf = self.in_function + self.in_function = True + + local_ident_stack = self.local_ident_stack + self.local_ident_stack = local_ident_stack.union([ + arg_id(arg) for arg in self._expand_tuples(node.args.args) + ]) + if islambda: + self.visit(node.body) + else: + for n in node.body: + self.visit(n) + self.in_function = inf + self.local_ident_stack = local_ident_stack + + def visit_For(self, node): + + # flip around visit + + self.visit(node.iter) + self.visit(node.target) + for statement in node.body: + self.visit(statement) + for statement in node.orelse: + self.visit(statement) + + def visit_Name(self, node): + if isinstance(node.ctx, _ast.Store): + # this is eqiuvalent to visit_AssName in + # compiler + self._add_declared(node.id) + elif node.id not in reserved and node.id \ + not in self.listener.declared_identifiers and node.id \ + not in self.local_ident_stack: + self.listener.undeclared_identifiers.add(node.id) + + def visit_Import(self, node): + for name in node.names: + if name.asname is not None: + self._add_declared(name.asname) + else: + self._add_declared(name.name.split('.')[0]) + + def visit_ImportFrom(self, node): + for name in node.names: + if name.asname is not None: + self._add_declared(name.asname) + else: + if name.name == '*': + raise exceptions.CompileException( + "'import *' is not supported, since all identifier " + "names must be explicitly declared. Please use the " + "form 'from import , , " + "...' instead.", **self.exception_kwargs) + self._add_declared(name.name) + + +class FindTuple(_ast_util.NodeVisitor): + + def __init__(self, listener, code_factory, **exception_kwargs): + self.listener = listener + self.exception_kwargs = exception_kwargs + self.code_factory = code_factory + + def visit_Tuple(self, node): + for n in node.elts: + p = self.code_factory(n, **self.exception_kwargs) + self.listener.codeargs.append(p) + self.listener.args.append(ExpressionGenerator(n).value()) + self.listener.declared_identifiers = \ + self.listener.declared_identifiers.union( + p.declared_identifiers) + self.listener.undeclared_identifiers = \ + self.listener.undeclared_identifiers.union( + p.undeclared_identifiers) + + +class ParseFunc(_ast_util.NodeVisitor): + + def __init__(self, listener, **exception_kwargs): + self.listener = listener + self.exception_kwargs = exception_kwargs + + def visit_FunctionDef(self, node): + self.listener.funcname = node.name + + argnames = [arg_id(arg) for arg in node.args.args] + if node.args.vararg: + argnames.append(arg_stringname(node.args.vararg)) + + if compat.py2k: + # kw-only args don't exist in Python 2 + kwargnames = [] + else: + kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs] + if node.args.kwarg: + kwargnames.append(arg_stringname(node.args.kwarg)) + self.listener.argnames = argnames + self.listener.defaults = node.args.defaults # ast + self.listener.kwargnames = kwargnames + if compat.py2k: + self.listener.kwdefaults = [] + else: + self.listener.kwdefaults = node.args.kw_defaults + self.listener.varargs = node.args.vararg + self.listener.kwargs = node.args.kwarg + + +class ExpressionGenerator(object): + + def __init__(self, astnode): + self.generator = _ast_util.SourceGenerator(' ' * 4) + self.generator.visit(astnode) + + def value(self): + return ''.join(self.generator.result) diff --git a/server/www/packages/packages-windows/x86/mako/runtime.py b/server/www/packages/packages-windows/x86/mako/runtime.py new file mode 100644 index 0000000..769541c --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/runtime.py @@ -0,0 +1,918 @@ +# mako/runtime.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""provides runtime services for templates, including Context, +Namespace, and various helper functions.""" + +from mako import exceptions, util, compat +from mako.compat import compat_builtins +import sys + + +class Context(object): + + """Provides runtime namespace, output buffer, and various + callstacks for templates. + + See :ref:`runtime_toplevel` for detail on the usage of + :class:`.Context`. + + """ + + def __init__(self, buffer, **data): + self._buffer_stack = [buffer] + + self._data = data + + self._kwargs = data.copy() + self._with_template = None + self._outputting_as_unicode = None + self.namespaces = {} + + # "capture" function which proxies to the + # generic "capture" function + self._data['capture'] = compat.partial(capture, self) + + # "caller" stack used by def calls with content + self.caller_stack = self._data['caller'] = CallerStack() + + def _set_with_template(self, t): + self._with_template = t + illegal_names = t.reserved_names.intersection(self._data) + if illegal_names: + raise exceptions.NameConflictError( + "Reserved words passed to render(): %s" % + ", ".join(illegal_names)) + + @property + def lookup(self): + """Return the :class:`.TemplateLookup` associated + with this :class:`.Context`. + + """ + return self._with_template.lookup + + @property + def kwargs(self): + """Return the dictionary of top level keyword arguments associated + with this :class:`.Context`. + + This dictionary only includes the top-level arguments passed to + :meth:`.Template.render`. It does not include names produced within + the template execution such as local variable names or special names + such as ``self``, ``next``, etc. + + The purpose of this dictionary is primarily for the case that + a :class:`.Template` accepts arguments via its ``<%page>`` tag, + which are normally expected to be passed via :meth:`.Template.render`, + except the template is being called in an inheritance context, + using the ``body()`` method. :attr:`.Context.kwargs` can then be + used to propagate these arguments to the inheriting template:: + + ${next.body(**context.kwargs)} + + """ + return self._kwargs.copy() + + def push_caller(self, caller): + """Push a ``caller`` callable onto the callstack for + this :class:`.Context`.""" + + self.caller_stack.append(caller) + + def pop_caller(self): + """Pop a ``caller`` callable onto the callstack for this + :class:`.Context`.""" + + del self.caller_stack[-1] + + def keys(self): + """Return a list of all names established in this :class:`.Context`.""" + + return list(self._data.keys()) + + def __getitem__(self, key): + if key in self._data: + return self._data[key] + else: + return compat_builtins.__dict__[key] + + def _push_writer(self): + """push a capturing buffer onto this Context and return + the new writer function.""" + + buf = util.FastEncodingBuffer() + self._buffer_stack.append(buf) + return buf.write + + def _pop_buffer_and_writer(self): + """pop the most recent capturing buffer from this Context + and return the current writer after the pop. + + """ + + buf = self._buffer_stack.pop() + return buf, self._buffer_stack[-1].write + + def _push_buffer(self): + """push a capturing buffer onto this Context.""" + + self._push_writer() + + def _pop_buffer(self): + """pop the most recent capturing buffer from this Context.""" + + return self._buffer_stack.pop() + + def get(self, key, default=None): + """Return a value from this :class:`.Context`.""" + + return self._data.get(key, compat_builtins.__dict__.get(key, default)) + + def write(self, string): + """Write a string to this :class:`.Context` object's + underlying output buffer.""" + + self._buffer_stack[-1].write(string) + + def writer(self): + """Return the current writer function.""" + + return self._buffer_stack[-1].write + + def _copy(self): + c = Context.__new__(Context) + c._buffer_stack = self._buffer_stack + c._data = self._data.copy() + c._kwargs = self._kwargs + c._with_template = self._with_template + c._outputting_as_unicode = self._outputting_as_unicode + c.namespaces = self.namespaces + c.caller_stack = self.caller_stack + return c + + def _locals(self, d): + """Create a new :class:`.Context` with a copy of this + :class:`.Context`'s current state, + updated with the given dictionary. + + The :attr:`.Context.kwargs` collection remains + unaffected. + + + """ + + if not d: + return self + c = self._copy() + c._data.update(d) + return c + + def _clean_inheritance_tokens(self): + """create a new copy of this :class:`.Context`. with + tokens related to inheritance state removed.""" + + c = self._copy() + x = c._data + x.pop('self', None) + x.pop('parent', None) + x.pop('next', None) + return c + + +class CallerStack(list): + + def __init__(self): + self.nextcaller = None + + def __nonzero__(self): + return self.__bool__() + + def __bool__(self): + return len(self) and self._get_caller() and True or False + + def _get_caller(self): + # this method can be removed once + # codegen MAGIC_NUMBER moves past 7 + return self[-1] + + def __getattr__(self, key): + return getattr(self._get_caller(), key) + + def _push_frame(self): + frame = self.nextcaller or None + self.append(frame) + self.nextcaller = None + return frame + + def _pop_frame(self): + self.nextcaller = self.pop() + + +class Undefined(object): + + """Represents an undefined value in a template. + + All template modules have a constant value + ``UNDEFINED`` present which is an instance of this + object. + + """ + + def __str__(self): + raise NameError("Undefined") + + def __nonzero__(self): + return self.__bool__() + + def __bool__(self): + return False + +UNDEFINED = Undefined() +STOP_RENDERING = "" + + +class LoopStack(object): + + """a stack for LoopContexts that implements the context manager protocol + to automatically pop off the top of the stack on context exit + """ + + def __init__(self): + self.stack = [] + + def _enter(self, iterable): + self._push(iterable) + return self._top + + def _exit(self): + self._pop() + return self._top + + @property + def _top(self): + if self.stack: + return self.stack[-1] + else: + return self + + def _pop(self): + return self.stack.pop() + + def _push(self, iterable): + new = LoopContext(iterable) + if self.stack: + new.parent = self.stack[-1] + return self.stack.append(new) + + def __getattr__(self, key): + raise exceptions.RuntimeException("No loop context is established") + + def __iter__(self): + return iter(self._top) + + +class LoopContext(object): + + """A magic loop variable. + Automatically accessible in any ``% for`` block. + + See the section :ref:`loop_context` for usage + notes. + + :attr:`parent` -> :class:`.LoopContext` or ``None`` + The parent loop, if one exists. + :attr:`index` -> `int` + The 0-based iteration count. + :attr:`reverse_index` -> `int` + The number of iterations remaining. + :attr:`first` -> `bool` + ``True`` on the first iteration, ``False`` otherwise. + :attr:`last` -> `bool` + ``True`` on the last iteration, ``False`` otherwise. + :attr:`even` -> `bool` + ``True`` when ``index`` is even. + :attr:`odd` -> `bool` + ``True`` when ``index`` is odd. + """ + + def __init__(self, iterable): + self._iterable = iterable + self.index = 0 + self.parent = None + + def __iter__(self): + for i in self._iterable: + yield i + self.index += 1 + + @util.memoized_instancemethod + def __len__(self): + return len(self._iterable) + + @property + def reverse_index(self): + return len(self) - self.index - 1 + + @property + def first(self): + return self.index == 0 + + @property + def last(self): + return self.index == len(self) - 1 + + @property + def even(self): + return not self.odd + + @property + def odd(self): + return bool(self.index % 2) + + def cycle(self, *values): + """Cycle through values as the loop progresses. + """ + if not values: + raise ValueError("You must provide values to cycle through") + return values[self.index % len(values)] + + +class _NSAttr(object): + + def __init__(self, parent): + self.__parent = parent + + def __getattr__(self, key): + ns = self.__parent + while ns: + if hasattr(ns.module, key): + return getattr(ns.module, key) + else: + ns = ns.inherits + raise AttributeError(key) + + +class Namespace(object): + + """Provides access to collections of rendering methods, which + can be local, from other templates, or from imported modules. + + To access a particular rendering method referenced by a + :class:`.Namespace`, use plain attribute access: + + .. sourcecode:: mako + + ${some_namespace.foo(x, y, z)} + + :class:`.Namespace` also contains several built-in attributes + described here. + + """ + + def __init__(self, name, context, + callables=None, inherits=None, + populate_self=True, calling_uri=None): + self.name = name + self.context = context + self.inherits = inherits + if callables is not None: + self.callables = dict([(c.__name__, c) for c in callables]) + + callables = () + + module = None + """The Python module referenced by this :class:`.Namespace`. + + If the namespace references a :class:`.Template`, then + this module is the equivalent of ``template.module``, + i.e. the generated module for the template. + + """ + + template = None + """The :class:`.Template` object referenced by this + :class:`.Namespace`, if any. + + """ + + context = None + """The :class:`.Context` object for this :class:`.Namespace`. + + Namespaces are often created with copies of contexts that + contain slightly different data, particularly in inheritance + scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one + can traverse an entire chain of templates that inherit from + one-another. + + """ + + filename = None + """The path of the filesystem file used for this + :class:`.Namespace`'s module or template. + + If this is a pure module-based + :class:`.Namespace`, this evaluates to ``module.__file__``. If a + template-based namespace, it evaluates to the original + template file location. + + """ + + uri = None + """The URI for this :class:`.Namespace`'s template. + + I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`. + + This is the equivalent of :attr:`.Template.uri`. + + """ + + _templateuri = None + + @util.memoized_property + def attr(self): + """Access module level attributes by name. + + This accessor allows templates to supply "scalar" + attributes which are particularly handy in inheritance + relationships. + + .. seealso:: + + :ref:`inheritance_attr` + + :ref:`namespace_attr_for_includes` + + """ + return _NSAttr(self) + + def get_namespace(self, uri): + """Return a :class:`.Namespace` corresponding to the given ``uri``. + + If the given ``uri`` is a relative URI (i.e. it does not + contain a leading slash ``/``), the ``uri`` is adjusted to + be relative to the ``uri`` of the namespace itself. This + method is therefore mostly useful off of the built-in + ``local`` namespace, described in :ref:`namespace_local`. + + In + most cases, a template wouldn't need this function, and + should instead use the ``<%namespace>`` tag to load + namespaces. However, since all ``<%namespace>`` tags are + evaluated before the body of a template ever runs, + this method can be used to locate namespaces using + expressions that were generated within the body code of + the template, or to conditionally use a particular + namespace. + + """ + key = (self, uri) + if key in self.context.namespaces: + return self.context.namespaces[key] + else: + ns = TemplateNamespace(uri, self.context._copy(), + templateuri=uri, + calling_uri=self._templateuri) + self.context.namespaces[key] = ns + return ns + + def get_template(self, uri): + """Return a :class:`.Template` from the given ``uri``. + + The ``uri`` resolution is relative to the ``uri`` of this + :class:`.Namespace` object's :class:`.Template`. + + """ + return _lookup_template(self.context, uri, self._templateuri) + + def get_cached(self, key, **kwargs): + """Return a value from the :class:`.Cache` referenced by this + :class:`.Namespace` object's :class:`.Template`. + + The advantage to this method versus direct access to the + :class:`.Cache` is that the configuration parameters + declared in ``<%page>`` take effect here, thereby calling + up the same configured backend as that configured + by ``<%page>``. + + """ + + return self.cache.get(key, **kwargs) + + @property + def cache(self): + """Return the :class:`.Cache` object referenced + by this :class:`.Namespace` object's + :class:`.Template`. + + """ + return self.template.cache + + def include_file(self, uri, **kwargs): + """Include a file at the given ``uri``.""" + + _include_file(self.context, uri, self._templateuri, **kwargs) + + def _populate(self, d, l): + for ident in l: + if ident == '*': + for (k, v) in self._get_star(): + d[k] = v + else: + d[ident] = getattr(self, ident) + + def _get_star(self): + if self.callables: + for key in self.callables: + yield (key, self.callables[key]) + + def __getattr__(self, key): + if key in self.callables: + val = self.callables[key] + elif self.inherits: + val = getattr(self.inherits, key) + else: + raise AttributeError( + "Namespace '%s' has no member '%s'" % + (self.name, key)) + setattr(self, key, val) + return val + + +class TemplateNamespace(Namespace): + + """A :class:`.Namespace` specific to a :class:`.Template` instance.""" + + def __init__(self, name, context, template=None, templateuri=None, + callables=None, inherits=None, + populate_self=True, calling_uri=None): + self.name = name + self.context = context + self.inherits = inherits + if callables is not None: + self.callables = dict([(c.__name__, c) for c in callables]) + + if templateuri is not None: + self.template = _lookup_template(context, templateuri, + calling_uri) + self._templateuri = self.template.module._template_uri + elif template is not None: + self.template = template + self._templateuri = template.module._template_uri + else: + raise TypeError("'template' argument is required.") + + if populate_self: + lclcallable, lclcontext = \ + _populate_self_namespace(context, self.template, + self_ns=self) + + @property + def module(self): + """The Python module referenced by this :class:`.Namespace`. + + If the namespace references a :class:`.Template`, then + this module is the equivalent of ``template.module``, + i.e. the generated module for the template. + + """ + return self.template.module + + @property + def filename(self): + """The path of the filesystem file used for this + :class:`.Namespace`'s module or template. + """ + return self.template.filename + + @property + def uri(self): + """The URI for this :class:`.Namespace`'s template. + + I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`. + + This is the equivalent of :attr:`.Template.uri`. + + """ + return self.template.uri + + def _get_star(self): + if self.callables: + for key in self.callables: + yield (key, self.callables[key]) + + def get(key): + callable_ = self.template._get_def_callable(key) + return compat.partial(callable_, self.context) + for k in self.template.module._exports: + yield (k, get(k)) + + def __getattr__(self, key): + if key in self.callables: + val = self.callables[key] + elif self.template.has_def(key): + callable_ = self.template._get_def_callable(key) + val = compat.partial(callable_, self.context) + elif self.inherits: + val = getattr(self.inherits, key) + + else: + raise AttributeError( + "Namespace '%s' has no member '%s'" % + (self.name, key)) + setattr(self, key, val) + return val + + +class ModuleNamespace(Namespace): + + """A :class:`.Namespace` specific to a Python module instance.""" + + def __init__(self, name, context, module, + callables=None, inherits=None, + populate_self=True, calling_uri=None): + self.name = name + self.context = context + self.inherits = inherits + if callables is not None: + self.callables = dict([(c.__name__, c) for c in callables]) + + mod = __import__(module) + for token in module.split('.')[1:]: + mod = getattr(mod, token) + self.module = mod + + @property + def filename(self): + """The path of the filesystem file used for this + :class:`.Namespace`'s module or template. + """ + return self.module.__file__ + + def _get_star(self): + if self.callables: + for key in self.callables: + yield (key, self.callables[key]) + for key in dir(self.module): + if key[0] != '_': + callable_ = getattr(self.module, key) + if compat.callable(callable_): + yield key, compat.partial(callable_, self.context) + + def __getattr__(self, key): + if key in self.callables: + val = self.callables[key] + elif hasattr(self.module, key): + callable_ = getattr(self.module, key) + val = compat.partial(callable_, self.context) + elif self.inherits: + val = getattr(self.inherits, key) + else: + raise AttributeError( + "Namespace '%s' has no member '%s'" % + (self.name, key)) + setattr(self, key, val) + return val + + +def supports_caller(func): + """Apply a caller_stack compatibility decorator to a plain + Python function. + + See the example in :ref:`namespaces_python_modules`. + + """ + + def wrap_stackframe(context, *args, **kwargs): + context.caller_stack._push_frame() + try: + return func(context, *args, **kwargs) + finally: + context.caller_stack._pop_frame() + return wrap_stackframe + + +def capture(context, callable_, *args, **kwargs): + """Execute the given template def, capturing the output into + a buffer. + + See the example in :ref:`namespaces_python_modules`. + + """ + + if not compat.callable(callable_): + raise exceptions.RuntimeException( + "capture() function expects a callable as " + "its argument (i.e. capture(func, *args, **kwargs))" + ) + context._push_buffer() + try: + callable_(*args, **kwargs) + finally: + buf = context._pop_buffer() + return buf.getvalue() + + +def _decorate_toplevel(fn): + def decorate_render(render_fn): + def go(context, *args, **kw): + def y(*args, **kw): + return render_fn(context, *args, **kw) + try: + y.__name__ = render_fn.__name__[7:] + except TypeError: + # < Python 2.4 + pass + return fn(y)(context, *args, **kw) + return go + return decorate_render + + +def _decorate_inline(context, fn): + def decorate_render(render_fn): + dec = fn(render_fn) + + def go(*args, **kw): + return dec(context, *args, **kw) + return go + return decorate_render + + +def _include_file(context, uri, calling_uri, **kwargs): + """locate the template from the given uri and include it in + the current output.""" + + template = _lookup_template(context, uri, calling_uri) + (callable_, ctx) = _populate_self_namespace( + context._clean_inheritance_tokens(), + template) + kwargs = _kwargs_for_include(callable_, context._data, **kwargs) + if template.include_error_handler: + try: + callable_(ctx, **kwargs) + except Exception: + result = template.include_error_handler(ctx, compat.exception_as()) + if not result: + compat.reraise(*sys.exc_info()) + else: + callable_(ctx, **kwargs) + + +def _inherit_from(context, uri, calling_uri): + """called by the _inherit method in template modules to set + up the inheritance chain at the start of a template's + execution.""" + + if uri is None: + return None + template = _lookup_template(context, uri, calling_uri) + self_ns = context['self'] + ih = self_ns + while ih.inherits is not None: + ih = ih.inherits + lclcontext = context._locals({'next': ih}) + ih.inherits = TemplateNamespace("self:%s" % template.uri, + lclcontext, + template=template, + populate_self=False) + context._data['parent'] = lclcontext._data['local'] = ih.inherits + callable_ = getattr(template.module, '_mako_inherit', None) + if callable_ is not None: + ret = callable_(template, lclcontext) + if ret: + return ret + + gen_ns = getattr(template.module, '_mako_generate_namespaces', None) + if gen_ns is not None: + gen_ns(context) + return (template.callable_, lclcontext) + + +def _lookup_template(context, uri, relativeto): + lookup = context._with_template.lookup + if lookup is None: + raise exceptions.TemplateLookupException( + "Template '%s' has no TemplateLookup associated" % + context._with_template.uri) + uri = lookup.adjust_uri(uri, relativeto) + try: + return lookup.get_template(uri) + except exceptions.TopLevelLookupException: + raise exceptions.TemplateLookupException(str(compat.exception_as())) + + +def _populate_self_namespace(context, template, self_ns=None): + if self_ns is None: + self_ns = TemplateNamespace('self:%s' % template.uri, + context, template=template, + populate_self=False) + context._data['self'] = context._data['local'] = self_ns + if hasattr(template.module, '_mako_inherit'): + ret = template.module._mako_inherit(template, context) + if ret: + return ret + return (template.callable_, context) + + +def _render(template, callable_, args, data, as_unicode=False): + """create a Context and return the string + output of the given template and template callable.""" + + if as_unicode: + buf = util.FastEncodingBuffer(as_unicode=True) + elif template.bytestring_passthrough: + buf = compat.StringIO() + else: + buf = util.FastEncodingBuffer( + as_unicode=as_unicode, + encoding=template.output_encoding, + errors=template.encoding_errors) + context = Context(buf, **data) + context._outputting_as_unicode = as_unicode + context._set_with_template(template) + + _render_context(template, callable_, context, *args, + **_kwargs_for_callable(callable_, data)) + return context._pop_buffer().getvalue() + + +def _kwargs_for_callable(callable_, data): + argspec = compat.inspect_func_args(callable_) + # for normal pages, **pageargs is usually present + if argspec[2]: + return data + + # for rendering defs from the top level, figure out the args + namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None] + kwargs = {} + for arg in namedargs: + if arg != 'context' and arg in data and arg not in kwargs: + kwargs[arg] = data[arg] + return kwargs + + +def _kwargs_for_include(callable_, data, **kwargs): + argspec = compat.inspect_func_args(callable_) + namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None] + for arg in namedargs: + if arg != 'context' and arg in data and arg not in kwargs: + kwargs[arg] = data[arg] + return kwargs + + +def _render_context(tmpl, callable_, context, *args, **kwargs): + import mako.template as template + # create polymorphic 'self' namespace for this + # template with possibly updated context + if not isinstance(tmpl, template.DefTemplate): + # if main render method, call from the base of the inheritance stack + (inherit, lclcontext) = _populate_self_namespace(context, tmpl) + _exec_template(inherit, lclcontext, args=args, kwargs=kwargs) + else: + # otherwise, call the actual rendering method specified + (inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent) + _exec_template(callable_, context, args=args, kwargs=kwargs) + + +def _exec_template(callable_, context, args=None, kwargs=None): + """execute a rendering callable given the callable, a + Context, and optional explicit arguments + + the contextual Template will be located if it exists, and + the error handling options specified on that Template will + be interpreted here. + """ + template = context._with_template + if template is not None and \ + (template.format_exceptions or template.error_handler): + try: + callable_(context, *args, **kwargs) + except Exception: + _render_error(template, context, compat.exception_as()) + except: + e = sys.exc_info()[0] + _render_error(template, context, e) + else: + callable_(context, *args, **kwargs) + + +def _render_error(template, context, error): + if template.error_handler: + result = template.error_handler(context, error) + if not result: + compat.reraise(*sys.exc_info()) + else: + error_template = exceptions.html_error_template() + if context._outputting_as_unicode: + context._buffer_stack[:] = [ + util.FastEncodingBuffer(as_unicode=True)] + else: + context._buffer_stack[:] = [util.FastEncodingBuffer( + error_template.output_encoding, + error_template.encoding_errors)] + + context._set_with_template(error_template) + error_template.render_context(context, error=error) diff --git a/server/www/packages/packages-windows/x86/mako/template.py b/server/www/packages/packages-windows/x86/mako/template.py new file mode 100644 index 0000000..329632c --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/template.py @@ -0,0 +1,746 @@ +# mako/template.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Provides the Template class, a facade for parsing, generating and executing +template strings, as well as template runtime operations.""" + +from mako.lexer import Lexer +from mako import runtime, util, exceptions, codegen, cache, compat +import os +import re +import shutil +import stat +import sys +import tempfile +import types +import weakref + + +class Template(object): + + r"""Represents a compiled template. + + :class:`.Template` includes a reference to the original + template source (via the :attr:`.source` attribute) + as well as the source code of the + generated Python module (i.e. the :attr:`.code` attribute), + as well as a reference to an actual Python module. + + :class:`.Template` is constructed using either a literal string + representing the template text, or a filename representing a filesystem + path to a source file. + + :param text: textual template source. This argument is mutually + exclusive versus the ``filename`` parameter. + + :param filename: filename of the source template. This argument is + mutually exclusive versus the ``text`` parameter. + + :param buffer_filters: string list of filters to be applied + to the output of ``%def``\ s which are buffered, cached, or otherwise + filtered, after all filters + defined with the ``%def`` itself have been applied. Allows the + creation of default expression filters that let the output + of return-valued ``%def``\ s "opt out" of that filtering via + passing special attributes or objects. + + :param bytestring_passthrough: When ``True``, and ``output_encoding`` is + set to ``None``, and :meth:`.Template.render` is used to render, + the `StringIO` or `cStringIO` buffer will be used instead of the + default "fast" buffer. This allows raw bytestrings in the + output stream, such as in expressions, to pass straight + through to the buffer. This flag is forced + to ``True`` if ``disable_unicode`` is also configured. + + .. versionadded:: 0.4 + Added to provide the same behavior as that of the previous series. + + :param cache_args: Dictionary of cache configuration arguments that + will be passed to the :class:`.CacheImpl`. See :ref:`caching_toplevel`. + + :param cache_dir: + + .. deprecated:: 0.6 + Use the ``'dir'`` argument in the ``cache_args`` dictionary. + See :ref:`caching_toplevel`. + + :param cache_enabled: Boolean flag which enables caching of this + template. See :ref:`caching_toplevel`. + + :param cache_impl: String name of a :class:`.CacheImpl` caching + implementation to use. Defaults to ``'beaker'``. + + :param cache_type: + + .. deprecated:: 0.6 + Use the ``'type'`` argument in the ``cache_args`` dictionary. + See :ref:`caching_toplevel`. + + :param cache_url: + + .. deprecated:: 0.6 + Use the ``'url'`` argument in the ``cache_args`` dictionary. + See :ref:`caching_toplevel`. + + :param default_filters: List of string filter names that will + be applied to all expressions. See :ref:`filtering_default_filters`. + + :param disable_unicode: Disables all awareness of Python Unicode + objects. See :ref:`unicode_disabled`. + + :param enable_loop: When ``True``, enable the ``loop`` context variable. + This can be set to ``False`` to support templates that may + be making usage of the name "``loop``". Individual templates can + re-enable the "loop" context by placing the directive + ``enable_loop="True"`` inside the ``<%page>`` tag -- see + :ref:`migrating_loop`. + + :param encoding_errors: Error parameter passed to ``encode()`` when + string encoding is performed. See :ref:`usage_unicode`. + + :param error_handler: Python callable which is called whenever + compile or runtime exceptions occur. The callable is passed + the current context as well as the exception. If the + callable returns ``True``, the exception is considered to + be handled, else it is re-raised after the function + completes. Is used to provide custom error-rendering + functions. + + .. seealso:: + + :paramref:`.Template.include_error_handler` - include-specific + error handler function + + :param format_exceptions: if ``True``, exceptions which occur during + the render phase of this template will be caught and + formatted into an HTML error page, which then becomes the + rendered result of the :meth:`.render` call. Otherwise, + runtime exceptions are propagated outwards. + + :param imports: String list of Python statements, typically individual + "import" lines, which will be placed into the module level + preamble of all generated Python modules. See the example + in :ref:`filtering_default_filters`. + + :param future_imports: String list of names to import from `__future__`. + These will be concatenated into a comma-separated string and inserted + into the beginning of the template, e.g. ``futures_imports=['FOO', + 'BAR']`` results in ``from __future__ import FOO, BAR``. If you're + interested in using features like the new division operator, you must + use future_imports to convey that to the renderer, as otherwise the + import will not appear as the first executed statement in the generated + code and will therefore not have the desired effect. + + :param include_error_handler: An error handler that runs when this template + is included within another one via the ``<%include>`` tag, and raises an + error. Compare to the :paramref:`.Template.error_handler` option. + + .. versionadded:: 1.0.6 + + .. seealso:: + + :paramref:`.Template.error_handler` - top-level error handler function + + :param input_encoding: Encoding of the template's source code. Can + be used in lieu of the coding comment. See + :ref:`usage_unicode` as well as :ref:`unicode_toplevel` for + details on source encoding. + + :param lookup: a :class:`.TemplateLookup` instance that will be used + for all file lookups via the ``<%namespace>``, + ``<%include>``, and ``<%inherit>`` tags. See + :ref:`usage_templatelookup`. + + :param module_directory: Filesystem location where generated + Python module files will be placed. + + :param module_filename: Overrides the filename of the generated + Python module file. For advanced usage only. + + :param module_writer: A callable which overrides how the Python + module is written entirely. The callable is passed the + encoded source content of the module and the destination + path to be written to. The default behavior of module writing + uses a tempfile in conjunction with a file move in order + to make the operation atomic. So a user-defined module + writing function that mimics the default behavior would be: + + .. sourcecode:: python + + import tempfile + import os + import shutil + + def module_writer(source, outputpath): + (dest, name) = \\ + tempfile.mkstemp( + dir=os.path.dirname(outputpath) + ) + + os.write(dest, source) + os.close(dest) + shutil.move(name, outputpath) + + from mako.template import Template + mytemplate = Template( + filename="index.html", + module_directory="/path/to/modules", + module_writer=module_writer + ) + + The function is provided for unusual configurations where + certain platform-specific permissions or other special + steps are needed. + + :param output_encoding: The encoding to use when :meth:`.render` + is called. + See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`. + + :param preprocessor: Python callable which will be passed + the full template source before it is parsed. The return + result of the callable will be used as the template source + code. + + :param lexer_cls: A :class:`.Lexer` class used to parse + the template. The :class:`.Lexer` class is used by + default. + + .. versionadded:: 0.7.4 + + :param strict_undefined: Replaces the automatic usage of + ``UNDEFINED`` for any undeclared variables not located in + the :class:`.Context` with an immediate raise of + ``NameError``. The advantage is immediate reporting of + missing variables which include the name. + + .. versionadded:: 0.3.6 + + :param uri: string URI or other identifier for this template. + If not provided, the ``uri`` is generated from the filesystem + path, or from the in-memory identity of a non-file-based + template. The primary usage of the ``uri`` is to provide a key + within :class:`.TemplateLookup`, as well as to generate the + file path of the generated Python module file, if + ``module_directory`` is specified. + + """ + + lexer_cls = Lexer + + def __init__(self, + text=None, + filename=None, + uri=None, + format_exceptions=False, + error_handler=None, + lookup=None, + output_encoding=None, + encoding_errors='strict', + module_directory=None, + cache_args=None, + cache_impl='beaker', + cache_enabled=True, + cache_type=None, + cache_dir=None, + cache_url=None, + module_filename=None, + input_encoding=None, + disable_unicode=False, + module_writer=None, + bytestring_passthrough=False, + default_filters=None, + buffer_filters=(), + strict_undefined=False, + imports=None, + future_imports=None, + enable_loop=True, + preprocessor=None, + lexer_cls=None, + include_error_handler=None): + if uri: + self.module_id = re.sub(r'\W', "_", uri) + self.uri = uri + elif filename: + self.module_id = re.sub(r'\W', "_", filename) + drive, path = os.path.splitdrive(filename) + path = os.path.normpath(path).replace(os.path.sep, "/") + self.uri = path + else: + self.module_id = "memory:" + hex(id(self)) + self.uri = self.module_id + + u_norm = self.uri + if u_norm.startswith("/"): + u_norm = u_norm[1:] + u_norm = os.path.normpath(u_norm) + if u_norm.startswith(".."): + raise exceptions.TemplateLookupException( + "Template uri \"%s\" is invalid - " + "it cannot be relative outside " + "of the root path." % self.uri) + + self.input_encoding = input_encoding + self.output_encoding = output_encoding + self.encoding_errors = encoding_errors + self.disable_unicode = disable_unicode + self.bytestring_passthrough = bytestring_passthrough or disable_unicode + self.enable_loop = enable_loop + self.strict_undefined = strict_undefined + self.module_writer = module_writer + + if compat.py3k and disable_unicode: + raise exceptions.UnsupportedError( + "Mako for Python 3 does not " + "support disabling Unicode") + elif output_encoding and disable_unicode: + raise exceptions.UnsupportedError( + "output_encoding must be set to " + "None when disable_unicode is used.") + if default_filters is None: + if compat.py3k or self.disable_unicode: + self.default_filters = ['str'] + else: + self.default_filters = ['unicode'] + else: + self.default_filters = default_filters + self.buffer_filters = buffer_filters + + self.imports = imports + self.future_imports = future_imports + self.preprocessor = preprocessor + + if lexer_cls is not None: + self.lexer_cls = lexer_cls + + # if plain text, compile code in memory only + if text is not None: + (code, module) = _compile_text(self, text, filename) + self._code = code + self._source = text + ModuleInfo(module, None, self, filename, code, text) + elif filename is not None: + # if template filename and a module directory, load + # a filesystem-based module file, generating if needed + if module_filename is not None: + path = module_filename + elif module_directory is not None: + path = os.path.abspath( + os.path.join( + os.path.normpath(module_directory), + u_norm + ".py" + ) + ) + else: + path = None + module = self._compile_from_file(path, filename) + else: + raise exceptions.RuntimeException( + "Template requires text or filename") + + self.module = module + self.filename = filename + self.callable_ = self.module.render_body + self.format_exceptions = format_exceptions + self.error_handler = error_handler + self.include_error_handler = include_error_handler + self.lookup = lookup + + self.module_directory = module_directory + + self._setup_cache_args( + cache_impl, cache_enabled, cache_args, + cache_type, cache_dir, cache_url + ) + + @util.memoized_property + def reserved_names(self): + if self.enable_loop: + return codegen.RESERVED_NAMES + else: + return codegen.RESERVED_NAMES.difference(['loop']) + + def _setup_cache_args(self, + cache_impl, cache_enabled, cache_args, + cache_type, cache_dir, cache_url): + self.cache_impl = cache_impl + self.cache_enabled = cache_enabled + if cache_args: + self.cache_args = cache_args + else: + self.cache_args = {} + + # transfer deprecated cache_* args + if cache_type: + self.cache_args['type'] = cache_type + if cache_dir: + self.cache_args['dir'] = cache_dir + if cache_url: + self.cache_args['url'] = cache_url + + def _compile_from_file(self, path, filename): + if path is not None: + util.verify_directory(os.path.dirname(path)) + filemtime = os.stat(filename)[stat.ST_MTIME] + if not os.path.exists(path) or \ + os.stat(path)[stat.ST_MTIME] < filemtime: + data = util.read_file(filename) + _compile_module_file( + self, + data, + filename, + path, + self.module_writer) + module = compat.load_module(self.module_id, path) + del sys.modules[self.module_id] + if module._magic_number != codegen.MAGIC_NUMBER: + data = util.read_file(filename) + _compile_module_file( + self, + data, + filename, + path, + self.module_writer) + module = compat.load_module(self.module_id, path) + del sys.modules[self.module_id] + ModuleInfo(module, path, self, filename, None, None) + else: + # template filename and no module directory, compile code + # in memory + data = util.read_file(filename) + code, module = _compile_text( + self, + data, + filename) + self._source = None + self._code = code + ModuleInfo(module, None, self, filename, code, None) + return module + + @property + def source(self): + """Return the template source code for this :class:`.Template`.""" + + return _get_module_info_from_callable(self.callable_).source + + @property + def code(self): + """Return the module source code for this :class:`.Template`.""" + + return _get_module_info_from_callable(self.callable_).code + + @util.memoized_property + def cache(self): + return cache.Cache(self) + + @property + def cache_dir(self): + return self.cache_args['dir'] + + @property + def cache_url(self): + return self.cache_args['url'] + + @property + def cache_type(self): + return self.cache_args['type'] + + def render(self, *args, **data): + """Render the output of this template as a string. + + If the template specifies an output encoding, the string + will be encoded accordingly, else the output is raw (raw + output uses `cStringIO` and can't handle multibyte + characters). A :class:`.Context` object is created corresponding + to the given data. Arguments that are explicitly declared + by this template's internal rendering method are also + pulled from the given ``*args``, ``**data`` members. + + """ + return runtime._render(self, self.callable_, args, data) + + def render_unicode(self, *args, **data): + """Render the output of this template as a unicode object.""" + + return runtime._render(self, + self.callable_, + args, + data, + as_unicode=True) + + def render_context(self, context, *args, **kwargs): + """Render this :class:`.Template` with the given context. + + The data is written to the context's buffer. + + """ + if getattr(context, '_with_template', None) is None: + context._set_with_template(self) + runtime._render_context(self, + self.callable_, + context, + *args, + **kwargs) + + def has_def(self, name): + return hasattr(self.module, "render_%s" % name) + + def get_def(self, name): + """Return a def of this template as a :class:`.DefTemplate`.""" + + return DefTemplate(self, getattr(self.module, "render_%s" % name)) + + def list_defs(self): + """return a list of defs in the template. + + .. versionadded:: 1.0.4 + + """ + return [i[7:] for i in dir(self.module) if i[:7] == 'render_'] + + def _get_def_callable(self, name): + return getattr(self.module, "render_%s" % name) + + @property + def last_modified(self): + return self.module._modified_time + + +class ModuleTemplate(Template): + + """A Template which is constructed given an existing Python module. + + e.g.:: + + t = Template("this is a template") + f = file("mymodule.py", "w") + f.write(t.code) + f.close() + + import mymodule + + t = ModuleTemplate(mymodule) + print t.render() + + """ + + def __init__(self, module, + module_filename=None, + template=None, + template_filename=None, + module_source=None, + template_source=None, + output_encoding=None, + encoding_errors='strict', + disable_unicode=False, + bytestring_passthrough=False, + format_exceptions=False, + error_handler=None, + lookup=None, + cache_args=None, + cache_impl='beaker', + cache_enabled=True, + cache_type=None, + cache_dir=None, + cache_url=None, + include_error_handler=None, + ): + self.module_id = re.sub(r'\W', "_", module._template_uri) + self.uri = module._template_uri + self.input_encoding = module._source_encoding + self.output_encoding = output_encoding + self.encoding_errors = encoding_errors + self.disable_unicode = disable_unicode + self.bytestring_passthrough = bytestring_passthrough or disable_unicode + self.enable_loop = module._enable_loop + + if compat.py3k and disable_unicode: + raise exceptions.UnsupportedError( + "Mako for Python 3 does not " + "support disabling Unicode") + elif output_encoding and disable_unicode: + raise exceptions.UnsupportedError( + "output_encoding must be set to " + "None when disable_unicode is used.") + + self.module = module + self.filename = template_filename + ModuleInfo(module, + module_filename, + self, + template_filename, + module_source, + template_source) + + self.callable_ = self.module.render_body + self.format_exceptions = format_exceptions + self.error_handler = error_handler + self.include_error_handler = include_error_handler + self.lookup = lookup + self._setup_cache_args( + cache_impl, cache_enabled, cache_args, + cache_type, cache_dir, cache_url + ) + + +class DefTemplate(Template): + + """A :class:`.Template` which represents a callable def in a parent + template.""" + + def __init__(self, parent, callable_): + self.parent = parent + self.callable_ = callable_ + self.output_encoding = parent.output_encoding + self.module = parent.module + self.encoding_errors = parent.encoding_errors + self.format_exceptions = parent.format_exceptions + self.error_handler = parent.error_handler + self.include_error_handler = parent.include_error_handler + self.enable_loop = parent.enable_loop + self.lookup = parent.lookup + self.bytestring_passthrough = parent.bytestring_passthrough + + def get_def(self, name): + return self.parent.get_def(name) + + +class ModuleInfo(object): + + """Stores information about a module currently loaded into + memory, provides reverse lookups of template source, module + source code based on a module's identifier. + + """ + _modules = weakref.WeakValueDictionary() + + def __init__(self, + module, + module_filename, + template, + template_filename, + module_source, + template_source): + self.module = module + self.module_filename = module_filename + self.template_filename = template_filename + self.module_source = module_source + self.template_source = template_source + self._modules[module.__name__] = template._mmarker = self + if module_filename: + self._modules[module_filename] = self + + @classmethod + def get_module_source_metadata(cls, module_source, full_line_map=False): + source_map = re.search( + r"__M_BEGIN_METADATA(.+?)__M_END_METADATA", + module_source, re.S).group(1) + source_map = compat.json.loads(source_map) + source_map['line_map'] = dict( + (int(k), int(v)) + for k, v in source_map['line_map'].items()) + if full_line_map: + f_line_map = source_map['full_line_map'] = [] + line_map = source_map['line_map'] + + curr_templ_line = 1 + for mod_line in range(1, max(line_map)): + if mod_line in line_map: + curr_templ_line = line_map[mod_line] + f_line_map.append(curr_templ_line) + return source_map + + @property + def code(self): + if self.module_source is not None: + return self.module_source + else: + return util.read_python_file(self.module_filename) + + @property + def source(self): + if self.template_source is not None: + if self.module._source_encoding and \ + not isinstance(self.template_source, compat.text_type): + return self.template_source.decode( + self.module._source_encoding) + else: + return self.template_source + else: + data = util.read_file(self.template_filename) + if self.module._source_encoding: + return data.decode(self.module._source_encoding) + else: + return data + + +def _compile(template, text, filename, generate_magic_comment): + lexer = template.lexer_cls(text, + filename, + disable_unicode=template.disable_unicode, + input_encoding=template.input_encoding, + preprocessor=template.preprocessor) + node = lexer.parse() + source = codegen.compile(node, + template.uri, + filename, + default_filters=template.default_filters, + buffer_filters=template.buffer_filters, + imports=template.imports, + future_imports=template.future_imports, + source_encoding=lexer.encoding, + generate_magic_comment=generate_magic_comment, + disable_unicode=template.disable_unicode, + strict_undefined=template.strict_undefined, + enable_loop=template.enable_loop, + reserved_names=template.reserved_names) + return source, lexer + + +def _compile_text(template, text, filename): + identifier = template.module_id + source, lexer = _compile(template, text, filename, + generate_magic_comment=template.disable_unicode) + + cid = identifier + if not compat.py3k and isinstance(cid, compat.text_type): + cid = cid.encode() + module = types.ModuleType(cid) + code = compile(source, cid, 'exec') + + # this exec() works for 2.4->3.3. + exec(code, module.__dict__, module.__dict__) + return (source, module) + + +def _compile_module_file(template, text, filename, outputpath, module_writer): + source, lexer = _compile(template, text, filename, + generate_magic_comment=True) + + if isinstance(source, compat.text_type): + source = source.encode(lexer.encoding or 'ascii') + + if module_writer: + module_writer(source, outputpath) + else: + # make tempfiles in the same location as the ultimate + # location. this ensures they're on the same filesystem, + # avoiding synchronization issues. + (dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath)) + + os.write(dest, source) + os.close(dest) + shutil.move(name, outputpath) + + +def _get_module_info_from_callable(callable_): + if compat.py3k: + return _get_module_info(callable_.__globals__['__name__']) + else: + return _get_module_info(callable_.func_globals['__name__']) + + +def _get_module_info(filename): + return ModuleInfo._modules[filename] diff --git a/server/www/packages/packages-windows/x86/mako/util.py b/server/www/packages/packages-windows/x86/mako/util.py new file mode 100644 index 0000000..2f089ff --- /dev/null +++ b/server/www/packages/packages-windows/x86/mako/util.py @@ -0,0 +1,382 @@ +# mako/util.py +# Copyright (C) 2006-2016 the Mako authors and contributors +# +# This module is part of Mako and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import re +import collections +import codecs +import os +from mako import compat +import operator + + +def update_wrapper(decorated, fn): + decorated.__wrapped__ = fn + decorated.__name__ = fn.__name__ + return decorated + + +class PluginLoader(object): + + def __init__(self, group): + self.group = group + self.impls = {} + + def load(self, name): + if name in self.impls: + return self.impls[name]() + else: + import pkg_resources + for impl in pkg_resources.iter_entry_points( + self.group, + name): + self.impls[name] = impl.load + return impl.load() + else: + from mako import exceptions + raise exceptions.RuntimeException( + "Can't load plugin %s %s" % + (self.group, name)) + + def register(self, name, modulepath, objname): + def load(): + mod = __import__(modulepath) + for token in modulepath.split(".")[1:]: + mod = getattr(mod, token) + return getattr(mod, objname) + self.impls[name] = load + + +def verify_directory(dir): + """create and/or verify a filesystem directory.""" + + tries = 0 + + while not os.path.exists(dir): + try: + tries += 1 + os.makedirs(dir, compat.octal("0775")) + except: + if tries > 5: + raise + + +def to_list(x, default=None): + if x is None: + return default + if not isinstance(x, (list, tuple)): + return [x] + else: + return x + + +class memoized_property(object): + + """A read-only @property that is only evaluated once.""" + + def __init__(self, fget, doc=None): + self.fget = fget + self.__doc__ = doc or fget.__doc__ + self.__name__ = fget.__name__ + + def __get__(self, obj, cls): + if obj is None: + return self + obj.__dict__[self.__name__] = result = self.fget(obj) + return result + + +class memoized_instancemethod(object): + + """Decorate a method memoize its return value. + + Best applied to no-arg methods: memoization is not sensitive to + argument values, and will always return the same value even when + called with different arguments. + + """ + + def __init__(self, fget, doc=None): + self.fget = fget + self.__doc__ = doc or fget.__doc__ + self.__name__ = fget.__name__ + + def __get__(self, obj, cls): + if obj is None: + return self + + def oneshot(*args, **kw): + result = self.fget(obj, *args, **kw) + memo = lambda *a, **kw: result + memo.__name__ = self.__name__ + memo.__doc__ = self.__doc__ + obj.__dict__[self.__name__] = memo + return result + oneshot.__name__ = self.__name__ + oneshot.__doc__ = self.__doc__ + return oneshot + + +class SetLikeDict(dict): + + """a dictionary that has some setlike methods on it""" + + def union(self, other): + """produce a 'union' of this dict and another (at the key level). + + values in the second dict take precedence over that of the first""" + x = SetLikeDict(**self) + x.update(other) + return x + + +class FastEncodingBuffer(object): + + """a very rudimentary buffer that is faster than StringIO, + but doesn't crash on unicode data like cStringIO.""" + + def __init__(self, encoding=None, errors='strict', as_unicode=False): + self.data = collections.deque() + self.encoding = encoding + if as_unicode: + self.delim = compat.u('') + else: + self.delim = '' + self.as_unicode = as_unicode + self.errors = errors + self.write = self.data.append + + def truncate(self): + self.data = collections.deque() + self.write = self.data.append + + def getvalue(self): + if self.encoding: + return self.delim.join(self.data).encode(self.encoding, + self.errors) + else: + return self.delim.join(self.data) + + +class LRUCache(dict): + + """A dictionary-like object that stores a limited number of items, + discarding lesser used items periodically. + + this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based + paradigm so that synchronization is not really needed. the size management + is inexact. + """ + + class _Item(object): + + def __init__(self, key, value): + self.key = key + self.value = value + self.timestamp = compat.time_func() + + def __repr__(self): + return repr(self.value) + + def __init__(self, capacity, threshold=.5): + self.capacity = capacity + self.threshold = threshold + + def __getitem__(self, key): + item = dict.__getitem__(self, key) + item.timestamp = compat.time_func() + return item.value + + def values(self): + return [i.value for i in dict.values(self)] + + def setdefault(self, key, value): + if key in self: + return self[key] + else: + self[key] = value + return value + + def __setitem__(self, key, value): + item = dict.get(self, key) + if item is None: + item = self._Item(key, value) + dict.__setitem__(self, key, item) + else: + item.value = value + self._manage_size() + + def _manage_size(self): + while len(self) > self.capacity + self.capacity * self.threshold: + bytime = sorted(dict.values(self), + key=operator.attrgetter('timestamp'), reverse=True) + for item in bytime[self.capacity:]: + try: + del self[item.key] + except KeyError: + # if we couldn't find a key, most likely some other thread + # broke in on us. loop around and try again + break + +# Regexp to match python magic encoding line +_PYTHON_MAGIC_COMMENT_re = re.compile( + r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', + re.VERBOSE) + + +def parse_encoding(fp): + """Deduce the encoding of a Python source file (binary mode) from magic + comment. + + It does this in the same way as the `Python interpreter`__ + + .. __: http://docs.python.org/ref/encodings.html + + The ``fp`` argument should be a seekable file object in binary mode. + """ + pos = fp.tell() + fp.seek(0) + try: + line1 = fp.readline() + has_bom = line1.startswith(codecs.BOM_UTF8) + if has_bom: + line1 = line1[len(codecs.BOM_UTF8):] + + m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode('ascii', 'ignore')) + if not m: + try: + import parser + parser.suite(line1.decode('ascii', 'ignore')) + except (ImportError, SyntaxError): + # Either it's a real syntax error, in which case the source + # is not valid python source, or line2 is a continuation of + # line1, in which case we don't want to scan line2 for a magic + # comment. + pass + else: + line2 = fp.readline() + m = _PYTHON_MAGIC_COMMENT_re.match( + line2.decode('ascii', 'ignore')) + + if has_bom: + if m: + raise SyntaxError( + "python refuses to compile code with both a UTF8" + " byte-order-mark and a magic encoding comment") + return 'utf_8' + elif m: + return m.group(1) + else: + return None + finally: + fp.seek(pos) + + +def sorted_dict_repr(d): + """repr() a dictionary with the keys in order. + + Used by the lexer unit test to compare parse trees based on strings. + + """ + keys = list(d.keys()) + keys.sort() + return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}" + + +def restore__ast(_ast): + """Attempt to restore the required classes to the _ast module if it + appears to be missing them + """ + if hasattr(_ast, 'AST'): + return + _ast.PyCF_ONLY_AST = 2 << 9 + m = compile("""\ +def foo(): pass +class Bar(object): pass +if False: pass +baz = 'mako' +1 + 2 - 3 * 4 / 5 +6 // 7 % 8 << 9 >> 10 +11 & 12 ^ 13 | 14 +15 and 16 or 17 +-baz + (not +18) - ~17 +baz and 'foo' or 'bar' +(mako is baz == baz) is not baz != mako +mako > baz < mako >= baz <= mako +mako in baz not in mako""", '', 'exec', _ast.PyCF_ONLY_AST) + _ast.Module = type(m) + + for cls in _ast.Module.__mro__: + if cls.__name__ == 'mod': + _ast.mod = cls + elif cls.__name__ == 'AST': + _ast.AST = cls + + _ast.FunctionDef = type(m.body[0]) + _ast.ClassDef = type(m.body[1]) + _ast.If = type(m.body[2]) + + _ast.Name = type(m.body[3].targets[0]) + _ast.Store = type(m.body[3].targets[0].ctx) + _ast.Str = type(m.body[3].value) + + _ast.Sub = type(m.body[4].value.op) + _ast.Add = type(m.body[4].value.left.op) + _ast.Div = type(m.body[4].value.right.op) + _ast.Mult = type(m.body[4].value.right.left.op) + + _ast.RShift = type(m.body[5].value.op) + _ast.LShift = type(m.body[5].value.left.op) + _ast.Mod = type(m.body[5].value.left.left.op) + _ast.FloorDiv = type(m.body[5].value.left.left.left.op) + + _ast.BitOr = type(m.body[6].value.op) + _ast.BitXor = type(m.body[6].value.left.op) + _ast.BitAnd = type(m.body[6].value.left.left.op) + + _ast.Or = type(m.body[7].value.op) + _ast.And = type(m.body[7].value.values[0].op) + + _ast.Invert = type(m.body[8].value.right.op) + _ast.Not = type(m.body[8].value.left.right.op) + _ast.UAdd = type(m.body[8].value.left.right.operand.op) + _ast.USub = type(m.body[8].value.left.left.op) + + _ast.Or = type(m.body[9].value.op) + _ast.And = type(m.body[9].value.values[0].op) + + _ast.IsNot = type(m.body[10].value.ops[0]) + _ast.NotEq = type(m.body[10].value.ops[1]) + _ast.Is = type(m.body[10].value.left.ops[0]) + _ast.Eq = type(m.body[10].value.left.ops[1]) + + _ast.Gt = type(m.body[11].value.ops[0]) + _ast.Lt = type(m.body[11].value.ops[1]) + _ast.GtE = type(m.body[11].value.ops[2]) + _ast.LtE = type(m.body[11].value.ops[3]) + + _ast.In = type(m.body[12].value.ops[0]) + _ast.NotIn = type(m.body[12].value.ops[1]) + + +def read_file(path, mode='rb'): + fp = open(path, mode) + try: + data = fp.read() + return data + finally: + fp.close() + + +def read_python_file(path): + fp = open(path, "rb") + try: + encoding = parse_encoding(fp) + data = fp.read() + if encoding: + data = data.decode(encoding) + return data + finally: + fp.close() diff --git a/server/www/packages/packages-windows/x86/pymysql/__init__.py b/server/www/packages/packages-windows/x86/pymysql/__init__.py new file mode 100644 index 0000000..b79b4b8 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/__init__.py @@ -0,0 +1,141 @@ +""" +PyMySQL: A pure-Python MySQL client library. + +Copyright (c) 2010-2016 PyMySQL contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" +import sys + +from ._compat import PY2 +from .constants import FIELD_TYPE +from .converters import escape_dict, escape_sequence, escape_string +from .err import ( + Warning, Error, InterfaceError, DataError, + DatabaseError, OperationalError, IntegrityError, InternalError, + NotSupportedError, ProgrammingError, MySQLError) +from .times import ( + Date, Time, Timestamp, + DateFromTicks, TimeFromTicks, TimestampFromTicks) + + +VERSION = (0, 9, 2, None) +if VERSION[3] is not None: + VERSION_STRING = "%d.%d.%d_%s" % VERSION +else: + VERSION_STRING = "%d.%d.%d" % VERSION[:3] +threadsafety = 1 +apilevel = "2.0" +paramstyle = "pyformat" + + +class DBAPISet(frozenset): + + def __ne__(self, other): + if isinstance(other, set): + return frozenset.__ne__(self, other) + else: + return other not in self + + def __eq__(self, other): + if isinstance(other, frozenset): + return frozenset.__eq__(self, other) + else: + return other in self + + def __hash__(self): + return frozenset.__hash__(self) + + +STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING, + FIELD_TYPE.VAR_STRING]) +BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB, + FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB]) +NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT, + FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG, + FIELD_TYPE.TINY, FIELD_TYPE.YEAR]) +DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE]) +TIME = DBAPISet([FIELD_TYPE.TIME]) +TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME]) +DATETIME = TIMESTAMP +ROWID = DBAPISet() + + +def Binary(x): + """Return x as a binary type.""" + if PY2: + return bytearray(x) + else: + return bytes(x) + + +def Connect(*args, **kwargs): + """ + Connect to the database; see connections.Connection.__init__() for + more information. + """ + from .connections import Connection + return Connection(*args, **kwargs) + +from . import connections as _orig_conn +if _orig_conn.Connection.__init__.__doc__ is not None: + Connect.__doc__ = _orig_conn.Connection.__init__.__doc__ +del _orig_conn + + +def get_client_info(): # for MySQLdb compatibility + version = VERSION + if VERSION[3] is None: + version = VERSION[:3] + return '.'.join(map(str, version)) + +connect = Connection = Connect + +# we include a doctored version_info here for MySQLdb compatibility +version_info = (1, 3, 12, "final", 0) + +NULL = "NULL" + +__version__ = get_client_info() + +def thread_safe(): + return True # match MySQLdb.thread_safe() + +def install_as_MySQLdb(): + """ + After this function is called, any application that imports MySQLdb or + _mysql will unwittingly actually use pymysql. + """ + sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["pymysql"] + + +__all__ = [ + 'BINARY', 'Binary', 'Connect', 'Connection', 'DATE', 'Date', + 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', + 'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError', + 'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER', + 'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError', + 'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel', 'connect', + 'connections', 'constants', 'converters', 'cursors', + 'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info', + 'paramstyle', 'threadsafety', 'version_info', + + "install_as_MySQLdb", + "NULL", "__version__", +] diff --git a/server/www/packages/packages-windows/x86/pymysql/_auth.py b/server/www/packages/packages-windows/x86/pymysql/_auth.py new file mode 100644 index 0000000..bbb742d --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/_auth.py @@ -0,0 +1,253 @@ +""" +Implements auth methods +""" +from ._compat import text_type, PY2 +from .constants import CLIENT +from .err import OperationalError + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization, hashes +from cryptography.hazmat.primitives.asymmetric import padding + +from functools import partial +import hashlib +import struct + + +DEBUG = False +SCRAMBLE_LENGTH = 20 +sha1_new = partial(hashlib.new, 'sha1') + + +# mysql_native_password +# https://dev.mysql.com/doc/internals/en/secure-password-authentication.html#packet-Authentication::Native41 + + +def scramble_native_password(password, message): + """Scramble used for mysql_native_password""" + if not password: + return b'' + + stage1 = sha1_new(password).digest() + stage2 = sha1_new(stage1).digest() + s = sha1_new() + s.update(message[:SCRAMBLE_LENGTH]) + s.update(stage2) + result = s.digest() + return _my_crypt(result, stage1) + + +def _my_crypt(message1, message2): + result = bytearray(message1) + if PY2: + message2 = bytearray(message2) + + for i in range(len(result)): + result[i] ^= message2[i] + + return bytes(result) + + +# old_passwords support ported from libmysql/password.c +# https://dev.mysql.com/doc/internals/en/old-password-authentication.html + +SCRAMBLE_LENGTH_323 = 8 + + +class RandStruct_323(object): + + def __init__(self, seed1, seed2): + self.max_value = 0x3FFFFFFF + self.seed1 = seed1 % self.max_value + self.seed2 = seed2 % self.max_value + + def my_rnd(self): + self.seed1 = (self.seed1 * 3 + self.seed2) % self.max_value + self.seed2 = (self.seed1 + self.seed2 + 33) % self.max_value + return float(self.seed1) / float(self.max_value) + + +def scramble_old_password(password, message): + """Scramble for old_password""" + hash_pass = _hash_password_323(password) + hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323]) + hash_pass_n = struct.unpack(">LL", hash_pass) + hash_message_n = struct.unpack(">LL", hash_message) + + rand_st = RandStruct_323( + hash_pass_n[0] ^ hash_message_n[0], hash_pass_n[1] ^ hash_message_n[1] + ) + outbuf = io.BytesIO() + for _ in range(min(SCRAMBLE_LENGTH_323, len(message))): + outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64)) + extra = int2byte(int(rand_st.my_rnd() * 31)) + out = outbuf.getvalue() + outbuf = io.BytesIO() + for c in out: + outbuf.write(int2byte(byte2int(c) ^ byte2int(extra))) + return outbuf.getvalue() + + +def _hash_password_323(password): + nr = 1345345333 + add = 7 + nr2 = 0x12345671 + + # x in py3 is numbers, p27 is chars + for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]: + nr ^= (((nr & 63) + add) * c) + (nr << 8) & 0xFFFFFFFF + nr2 = (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF + add = (add + c) & 0xFFFFFFFF + + r1 = nr & ((1 << 31) - 1) # kill sign bits + r2 = nr2 & ((1 << 31) - 1) + return struct.pack(">LL", r1, r2) + + +# sha256_password + + +def _roundtrip(conn, send_data): + conn.write_packet(send_data) + pkt = conn._read_packet() + pkt.check_error() + return pkt + + +def _xor_password(password, salt): + password_bytes = bytearray(password) + salt = bytearray(salt) # for PY2 compat. + salt_len = len(salt) + for i in range(len(password_bytes)): + password_bytes[i] ^= salt[i % salt_len] + return bytes(password_bytes) + + +def sha2_rsa_encrypt(password, salt, public_key): + """Encrypt password with salt and public_key. + + Used for sha256_password and caching_sha2_password. + """ + message = _xor_password(password + b'\0', salt) + rsa_key = serialization.load_pem_public_key(public_key, default_backend()) + return rsa_key.encrypt( + message, + padding.OAEP( + mgf=padding.MGF1(algorithm=hashes.SHA1()), + algorithm=hashes.SHA1(), + label=None, + ), + ) + + +def sha256_password_auth(conn, pkt): + if conn._secure: + if DEBUG: + print("sha256: Sending plain password") + data = conn.password + b'\0' + return _roundtrip(conn, data) + + if pkt.is_auth_switch_request(): + conn.salt = pkt.read_all() + if not conn.server_public_key and conn.password: + # Request server public key + if DEBUG: + print("sha256: Requesting server public key") + pkt = _roundtrip(conn, b'\1') + + if pkt.is_extra_auth_data(): + conn.server_public_key = pkt._data[1:] + if DEBUG: + print("Received public key:\n", conn.server_public_key.decode('ascii')) + + if conn.password: + if not conn.server_public_key: + raise OperationalError("Couldn't receive server's public key") + + data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key) + else: + data = b'' + + return _roundtrip(conn, data) + + +def scramble_caching_sha2(password, nonce): + # (bytes, bytes) -> bytes + """Scramble algorithm used in cached_sha2_password fast path. + + XOR(SHA256(password), SHA256(SHA256(SHA256(password)), nonce)) + """ + if not password: + return b'' + + p1 = hashlib.sha256(password).digest() + p2 = hashlib.sha256(p1).digest() + p3 = hashlib.sha256(p2 + nonce).digest() + + res = bytearray(p1) + if PY2: + p3 = bytearray(p3) + for i in range(len(p3)): + res[i] ^= p3[i] + + return bytes(res) + + +def caching_sha2_password_auth(conn, pkt): + # No password fast path + if not conn.password: + return _roundtrip(conn, b'') + + if pkt.is_auth_switch_request(): + # Try from fast auth + if DEBUG: + print("caching sha2: Trying fast path") + conn.salt = pkt.read_all() + scrambled = scramble_caching_sha2(conn.password, conn.salt) + pkt = _roundtrip(conn, scrambled) + # else: fast auth is tried in initial handshake + + if not pkt.is_extra_auth_data(): + raise OperationalError( + "caching sha2: Unknown packet for fast auth: %s" % pkt._data[:1] + ) + + # magic numbers: + # 2 - request public key + # 3 - fast auth succeeded + # 4 - need full auth + + pkt.advance(1) + n = pkt.read_uint8() + + if n == 3: + if DEBUG: + print("caching sha2: succeeded by fast path.") + pkt = conn._read_packet() + pkt.check_error() # pkt must be OK packet + return pkt + + if n != 4: + raise OperationalError("caching sha2: Unknwon result for fast auth: %s" % n) + + if DEBUG: + print("caching sha2: Trying full auth...") + + if conn._secure: + if DEBUG: + print("caching sha2: Sending plain password via secure connection") + return _roundtrip(conn, conn.password + b'\0') + + if not conn.server_public_key: + pkt = _roundtrip(conn, b'\x02') # Request public key + if not pkt.is_extra_auth_data(): + raise OperationalError( + "caching sha2: Unknown packet for public key: %s" % pkt._data[:1] + ) + + conn.server_public_key = pkt._data[1:] + if DEBUG: + print(conn.server_public_key.decode('ascii')) + + data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key) + pkt = _roundtrip(conn, data) diff --git a/server/www/packages/packages-windows/x86/pymysql/_compat.py b/server/www/packages/packages-windows/x86/pymysql/_compat.py new file mode 100644 index 0000000..252789e --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/_compat.py @@ -0,0 +1,21 @@ +import sys + +PY2 = sys.version_info[0] == 2 +PYPY = hasattr(sys, 'pypy_translation_info') +JYTHON = sys.platform.startswith('java') +IRONPYTHON = sys.platform == 'cli' +CPYTHON = not PYPY and not JYTHON and not IRONPYTHON + +if PY2: + import __builtin__ + range_type = xrange + text_type = unicode + long_type = long + str_type = basestring + unichr = __builtin__.unichr +else: + range_type = range + text_type = str + long_type = int + str_type = str + unichr = chr diff --git a/server/www/packages/packages-windows/x86/pymysql/_socketio.py b/server/www/packages/packages-windows/x86/pymysql/_socketio.py new file mode 100644 index 0000000..6a11d42 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/_socketio.py @@ -0,0 +1,134 @@ +""" +SocketIO imported from socket module in Python 3. + +Copyright (c) 2001-2013 Python Software Foundation; All Rights Reserved. +""" + +from socket import * +import io +import errno + +__all__ = ['SocketIO'] + +EINTR = errno.EINTR +_blocking_errnos = (errno.EAGAIN, errno.EWOULDBLOCK) + +class SocketIO(io.RawIOBase): + + """Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + """ + + # One might wonder why not let FileIO do the job instead. There are two + # main reasons why FileIO is not adapted: + # - it wouldn't work under Windows (where you can't used read() and + # write() on a socket handle) + # - it wouldn't work with socket timeouts (FileIO would ignore the + # timeout and consider the socket non-blocking) + + # XXX More docs + + def __init__(self, sock, mode): + if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): + raise ValueError("invalid mode: %r" % mode) + io.RawIOBase.__init__(self) + self._sock = sock + if "b" not in mode: + mode += "b" + self._mode = mode + self._reading = "r" in mode + self._writing = "w" in mode + self._timeout_occurred = False + + def readinto(self, b): + """Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + """ + self._checkClosed() + self._checkReadable() + if self._timeout_occurred: + raise IOError("cannot read from timed out object") + while True: + try: + return self._sock.recv_into(b) + except timeout: + self._timeout_occurred = True + raise + except error as e: + n = e.args[0] + if n == EINTR: + continue + if n in _blocking_errnos: + return None + raise + + def write(self, b): + """Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + """ + self._checkClosed() + self._checkWritable() + try: + return self._sock.send(b) + except error as e: + # XXX what about EINTR? + if e.args[0] in _blocking_errnos: + return None + raise + + def readable(self): + """True if the SocketIO is open for reading. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._reading + + def writable(self): + """True if the SocketIO is open for writing. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._writing + + def seekable(self): + """True if the SocketIO is open for seeking. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return super().seekable() + + def fileno(self): + """Return the file descriptor of the underlying socket. + """ + self._checkClosed() + return self._sock.fileno() + + @property + def name(self): + if not self.closed: + return self.fileno() + else: + return -1 + + @property + def mode(self): + return self._mode + + def close(self): + """Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + """ + if self.closed: + return + io.RawIOBase.close(self) + self._sock._decref_socketios() + self._sock = None + diff --git a/server/www/packages/packages-windows/x86/pymysql/charset.py b/server/www/packages/packages-windows/x86/pymysql/charset.py new file mode 100644 index 0000000..968376c --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/charset.py @@ -0,0 +1,270 @@ +MBLENGTH = { + 8:1, + 33:3, + 88:2, + 91:2 + } + + +class Charset(object): + def __init__(self, id, name, collation, is_default): + self.id, self.name, self.collation = id, name, collation + self.is_default = is_default == 'Yes' + + def __repr__(self): + return "Charset(id=%s, name=%r, collation=%r)" % ( + self.id, self.name, self.collation) + + @property + def encoding(self): + name = self.name + if name == 'utf8mb4': + return 'utf8' + return name + + @property + def is_binary(self): + return self.id == 63 + + +class Charsets: + def __init__(self): + self._by_id = {} + + def add(self, c): + self._by_id[c.id] = c + + def by_id(self, id): + return self._by_id[id] + + def by_name(self, name): + name = name.lower() + for c in self._by_id.values(): + if c.name == name and c.is_default: + return c + +_charsets = Charsets() +""" +Generated with: + +mysql -N -s -e "select id, character_set_name, collation_name, is_default +from information_schema.collations order by id;" | python -c "import sys +for l in sys.stdin.readlines(): + id, name, collation, is_default = l.split(chr(9)) + print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \ + % (id, name, collation, is_default.strip()) +" + +""" +_charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes')) +_charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', '')) +_charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes')) +_charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes')) +_charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', '')) +_charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes')) +_charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes')) +_charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes')) +_charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes')) +_charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes')) +_charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes')) +_charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes')) +_charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes')) +_charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', '')) +_charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', '')) +_charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes')) +_charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes')) +_charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes')) +_charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', '')) +_charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', '')) +_charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes')) +_charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', '')) +_charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes')) +_charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes')) +_charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes')) +_charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', '')) +_charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes')) +_charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', '')) +_charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes')) +_charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', '')) +_charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes')) +_charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes')) +_charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', '')) +_charsets.add(Charset(35, 'ucs2', 'ucs2_general_ci', 'Yes')) +_charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes')) +_charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes')) +_charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes')) +_charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes')) +_charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes')) +_charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes')) +_charsets.add(Charset(42, 'latin7', 'latin7_general_cs', '')) +_charsets.add(Charset(43, 'macce', 'macce_bin', '')) +_charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', '')) +_charsets.add(Charset(45, 'utf8mb4', 'utf8mb4_general_ci', 'Yes')) +_charsets.add(Charset(46, 'utf8mb4', 'utf8mb4_bin', '')) +_charsets.add(Charset(47, 'latin1', 'latin1_bin', '')) +_charsets.add(Charset(48, 'latin1', 'latin1_general_ci', '')) +_charsets.add(Charset(49, 'latin1', 'latin1_general_cs', '')) +_charsets.add(Charset(50, 'cp1251', 'cp1251_bin', '')) +_charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes')) +_charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', '')) +_charsets.add(Charset(53, 'macroman', 'macroman_bin', '')) +_charsets.add(Charset(54, 'utf16', 'utf16_general_ci', 'Yes')) +_charsets.add(Charset(55, 'utf16', 'utf16_bin', '')) +_charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes')) +_charsets.add(Charset(58, 'cp1257', 'cp1257_bin', '')) +_charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes')) +_charsets.add(Charset(60, 'utf32', 'utf32_general_ci', 'Yes')) +_charsets.add(Charset(61, 'utf32', 'utf32_bin', '')) +_charsets.add(Charset(63, 'binary', 'binary', 'Yes')) +_charsets.add(Charset(64, 'armscii8', 'armscii8_bin', '')) +_charsets.add(Charset(65, 'ascii', 'ascii_bin', '')) +_charsets.add(Charset(66, 'cp1250', 'cp1250_bin', '')) +_charsets.add(Charset(67, 'cp1256', 'cp1256_bin', '')) +_charsets.add(Charset(68, 'cp866', 'cp866_bin', '')) +_charsets.add(Charset(69, 'dec8', 'dec8_bin', '')) +_charsets.add(Charset(70, 'greek', 'greek_bin', '')) +_charsets.add(Charset(71, 'hebrew', 'hebrew_bin', '')) +_charsets.add(Charset(72, 'hp8', 'hp8_bin', '')) +_charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', '')) +_charsets.add(Charset(74, 'koi8r', 'koi8r_bin', '')) +_charsets.add(Charset(75, 'koi8u', 'koi8u_bin', '')) +_charsets.add(Charset(77, 'latin2', 'latin2_bin', '')) +_charsets.add(Charset(78, 'latin5', 'latin5_bin', '')) +_charsets.add(Charset(79, 'latin7', 'latin7_bin', '')) +_charsets.add(Charset(80, 'cp850', 'cp850_bin', '')) +_charsets.add(Charset(81, 'cp852', 'cp852_bin', '')) +_charsets.add(Charset(82, 'swe7', 'swe7_bin', '')) +_charsets.add(Charset(83, 'utf8', 'utf8_bin', '')) +_charsets.add(Charset(84, 'big5', 'big5_bin', '')) +_charsets.add(Charset(85, 'euckr', 'euckr_bin', '')) +_charsets.add(Charset(86, 'gb2312', 'gb2312_bin', '')) +_charsets.add(Charset(87, 'gbk', 'gbk_bin', '')) +_charsets.add(Charset(88, 'sjis', 'sjis_bin', '')) +_charsets.add(Charset(89, 'tis620', 'tis620_bin', '')) +_charsets.add(Charset(90, 'ucs2', 'ucs2_bin', '')) +_charsets.add(Charset(91, 'ujis', 'ujis_bin', '')) +_charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes')) +_charsets.add(Charset(93, 'geostd8', 'geostd8_bin', '')) +_charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', '')) +_charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes')) +_charsets.add(Charset(96, 'cp932', 'cp932_bin', '')) +_charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes')) +_charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', '')) +_charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', '')) +_charsets.add(Charset(101, 'utf16', 'utf16_unicode_ci', '')) +_charsets.add(Charset(102, 'utf16', 'utf16_icelandic_ci', '')) +_charsets.add(Charset(103, 'utf16', 'utf16_latvian_ci', '')) +_charsets.add(Charset(104, 'utf16', 'utf16_romanian_ci', '')) +_charsets.add(Charset(105, 'utf16', 'utf16_slovenian_ci', '')) +_charsets.add(Charset(106, 'utf16', 'utf16_polish_ci', '')) +_charsets.add(Charset(107, 'utf16', 'utf16_estonian_ci', '')) +_charsets.add(Charset(108, 'utf16', 'utf16_spanish_ci', '')) +_charsets.add(Charset(109, 'utf16', 'utf16_swedish_ci', '')) +_charsets.add(Charset(110, 'utf16', 'utf16_turkish_ci', '')) +_charsets.add(Charset(111, 'utf16', 'utf16_czech_ci', '')) +_charsets.add(Charset(112, 'utf16', 'utf16_danish_ci', '')) +_charsets.add(Charset(113, 'utf16', 'utf16_lithuanian_ci', '')) +_charsets.add(Charset(114, 'utf16', 'utf16_slovak_ci', '')) +_charsets.add(Charset(115, 'utf16', 'utf16_spanish2_ci', '')) +_charsets.add(Charset(116, 'utf16', 'utf16_roman_ci', '')) +_charsets.add(Charset(117, 'utf16', 'utf16_persian_ci', '')) +_charsets.add(Charset(118, 'utf16', 'utf16_esperanto_ci', '')) +_charsets.add(Charset(119, 'utf16', 'utf16_hungarian_ci', '')) +_charsets.add(Charset(120, 'utf16', 'utf16_sinhala_ci', '')) +_charsets.add(Charset(128, 'ucs2', 'ucs2_unicode_ci', '')) +_charsets.add(Charset(129, 'ucs2', 'ucs2_icelandic_ci', '')) +_charsets.add(Charset(130, 'ucs2', 'ucs2_latvian_ci', '')) +_charsets.add(Charset(131, 'ucs2', 'ucs2_romanian_ci', '')) +_charsets.add(Charset(132, 'ucs2', 'ucs2_slovenian_ci', '')) +_charsets.add(Charset(133, 'ucs2', 'ucs2_polish_ci', '')) +_charsets.add(Charset(134, 'ucs2', 'ucs2_estonian_ci', '')) +_charsets.add(Charset(135, 'ucs2', 'ucs2_spanish_ci', '')) +_charsets.add(Charset(136, 'ucs2', 'ucs2_swedish_ci', '')) +_charsets.add(Charset(137, 'ucs2', 'ucs2_turkish_ci', '')) +_charsets.add(Charset(138, 'ucs2', 'ucs2_czech_ci', '')) +_charsets.add(Charset(139, 'ucs2', 'ucs2_danish_ci', '')) +_charsets.add(Charset(140, 'ucs2', 'ucs2_lithuanian_ci', '')) +_charsets.add(Charset(141, 'ucs2', 'ucs2_slovak_ci', '')) +_charsets.add(Charset(142, 'ucs2', 'ucs2_spanish2_ci', '')) +_charsets.add(Charset(143, 'ucs2', 'ucs2_roman_ci', '')) +_charsets.add(Charset(144, 'ucs2', 'ucs2_persian_ci', '')) +_charsets.add(Charset(145, 'ucs2', 'ucs2_esperanto_ci', '')) +_charsets.add(Charset(146, 'ucs2', 'ucs2_hungarian_ci', '')) +_charsets.add(Charset(147, 'ucs2', 'ucs2_sinhala_ci', '')) +_charsets.add(Charset(159, 'ucs2', 'ucs2_general_mysql500_ci', '')) +_charsets.add(Charset(160, 'utf32', 'utf32_unicode_ci', '')) +_charsets.add(Charset(161, 'utf32', 'utf32_icelandic_ci', '')) +_charsets.add(Charset(162, 'utf32', 'utf32_latvian_ci', '')) +_charsets.add(Charset(163, 'utf32', 'utf32_romanian_ci', '')) +_charsets.add(Charset(164, 'utf32', 'utf32_slovenian_ci', '')) +_charsets.add(Charset(165, 'utf32', 'utf32_polish_ci', '')) +_charsets.add(Charset(166, 'utf32', 'utf32_estonian_ci', '')) +_charsets.add(Charset(167, 'utf32', 'utf32_spanish_ci', '')) +_charsets.add(Charset(168, 'utf32', 'utf32_swedish_ci', '')) +_charsets.add(Charset(169, 'utf32', 'utf32_turkish_ci', '')) +_charsets.add(Charset(170, 'utf32', 'utf32_czech_ci', '')) +_charsets.add(Charset(171, 'utf32', 'utf32_danish_ci', '')) +_charsets.add(Charset(172, 'utf32', 'utf32_lithuanian_ci', '')) +_charsets.add(Charset(173, 'utf32', 'utf32_slovak_ci', '')) +_charsets.add(Charset(174, 'utf32', 'utf32_spanish2_ci', '')) +_charsets.add(Charset(175, 'utf32', 'utf32_roman_ci', '')) +_charsets.add(Charset(176, 'utf32', 'utf32_persian_ci', '')) +_charsets.add(Charset(177, 'utf32', 'utf32_esperanto_ci', '')) +_charsets.add(Charset(178, 'utf32', 'utf32_hungarian_ci', '')) +_charsets.add(Charset(179, 'utf32', 'utf32_sinhala_ci', '')) +_charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', '')) +_charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', '')) +_charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', '')) +_charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', '')) +_charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', '')) +_charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', '')) +_charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', '')) +_charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', '')) +_charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', '')) +_charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', '')) +_charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', '')) +_charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', '')) +_charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', '')) +_charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', '')) +_charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', '')) +_charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', '')) +_charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', '')) +_charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', '')) +_charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', '')) +_charsets.add(Charset(211, 'utf8', 'utf8_sinhala_ci', '')) +_charsets.add(Charset(223, 'utf8', 'utf8_general_mysql500_ci', '')) +_charsets.add(Charset(224, 'utf8mb4', 'utf8mb4_unicode_ci', '')) +_charsets.add(Charset(225, 'utf8mb4', 'utf8mb4_icelandic_ci', '')) +_charsets.add(Charset(226, 'utf8mb4', 'utf8mb4_latvian_ci', '')) +_charsets.add(Charset(227, 'utf8mb4', 'utf8mb4_romanian_ci', '')) +_charsets.add(Charset(228, 'utf8mb4', 'utf8mb4_slovenian_ci', '')) +_charsets.add(Charset(229, 'utf8mb4', 'utf8mb4_polish_ci', '')) +_charsets.add(Charset(230, 'utf8mb4', 'utf8mb4_estonian_ci', '')) +_charsets.add(Charset(231, 'utf8mb4', 'utf8mb4_spanish_ci', '')) +_charsets.add(Charset(232, 'utf8mb4', 'utf8mb4_swedish_ci', '')) +_charsets.add(Charset(233, 'utf8mb4', 'utf8mb4_turkish_ci', '')) +_charsets.add(Charset(234, 'utf8mb4', 'utf8mb4_czech_ci', '')) +_charsets.add(Charset(235, 'utf8mb4', 'utf8mb4_danish_ci', '')) +_charsets.add(Charset(236, 'utf8mb4', 'utf8mb4_lithuanian_ci', '')) +_charsets.add(Charset(237, 'utf8mb4', 'utf8mb4_slovak_ci', '')) +_charsets.add(Charset(238, 'utf8mb4', 'utf8mb4_spanish2_ci', '')) +_charsets.add(Charset(239, 'utf8mb4', 'utf8mb4_roman_ci', '')) +_charsets.add(Charset(240, 'utf8mb4', 'utf8mb4_persian_ci', '')) +_charsets.add(Charset(241, 'utf8mb4', 'utf8mb4_esperanto_ci', '')) +_charsets.add(Charset(242, 'utf8mb4', 'utf8mb4_hungarian_ci', '')) +_charsets.add(Charset(243, 'utf8mb4', 'utf8mb4_sinhala_ci', '')) +_charsets.add(Charset(244, 'utf8mb4', 'utf8mb4_german2_ci', '')) +_charsets.add(Charset(245, 'utf8mb4', 'utf8mb4_croatian_ci', '')) +_charsets.add(Charset(246, 'utf8mb4', 'utf8mb4_unicode_520_ci', '')) +_charsets.add(Charset(247, 'utf8mb4', 'utf8mb4_vietnamese_ci', '')) + + +charset_by_name = _charsets.by_name +charset_by_id = _charsets.by_id + + +def charset_to_encoding(name): + """Convert MySQL's charset name to Python's codec name""" + if name == 'utf8mb4': + return 'utf8' + return name diff --git a/server/www/packages/packages-windows/x86/pymysql/connections.py b/server/www/packages/packages-windows/x86/pymysql/connections.py new file mode 100644 index 0000000..1e580d2 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/connections.py @@ -0,0 +1,1270 @@ +# Python implementation of the MySQL client-server protocol +# http://dev.mysql.com/doc/internals/en/client-server-protocol.html +# Error codes: +# http://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html +from __future__ import print_function +from ._compat import PY2, range_type, text_type, str_type, JYTHON, IRONPYTHON + +import errno +import io +import os +import socket +import struct +import sys +import traceback +import warnings + +from . import _auth + +from .charset import charset_by_name, charset_by_id +from .constants import CLIENT, COMMAND, CR, FIELD_TYPE, SERVER_STATUS +from . import converters +from .cursors import Cursor +from .optionfile import Parser +from .protocol import ( + dump_packet, MysqlPacket, FieldDescriptorPacket, OKPacketWrapper, + EOFPacketWrapper, LoadLocalPacketWrapper +) +from .util import byte2int, int2byte +from . import err, VERSION_STRING + +try: + import ssl + SSL_ENABLED = True +except ImportError: + ssl = None + SSL_ENABLED = False + +try: + import getpass + DEFAULT_USER = getpass.getuser() + del getpass +except (ImportError, KeyError): + # KeyError occurs when there's no entry in OS database for a current user. + DEFAULT_USER = None + +DEBUG = False + +_py_version = sys.version_info[:2] + +if PY2: + pass +elif _py_version < (3, 6): + # See http://bugs.python.org/issue24870 + _surrogateescape_table = [chr(i) if i < 0x80 else chr(i + 0xdc00) for i in range(256)] + + def _fast_surrogateescape(s): + return s.decode('latin1').translate(_surrogateescape_table) +else: + def _fast_surrogateescape(s): + return s.decode('ascii', 'surrogateescape') + +# socket.makefile() in Python 2 is not usable because very inefficient and +# bad behavior about timeout. +# XXX: ._socketio doesn't work under IronPython. +if PY2 and not IRONPYTHON: + # read method of file-like returned by sock.makefile() is very slow. + # So we copy io-based one from Python 3. + from ._socketio import SocketIO + + def _makefile(sock, mode): + return io.BufferedReader(SocketIO(sock, mode)) +else: + # socket.makefile in Python 3 is nice. + def _makefile(sock, mode): + return sock.makefile(mode) + + +TEXT_TYPES = { + FIELD_TYPE.BIT, + FIELD_TYPE.BLOB, + FIELD_TYPE.LONG_BLOB, + FIELD_TYPE.MEDIUM_BLOB, + FIELD_TYPE.STRING, + FIELD_TYPE.TINY_BLOB, + FIELD_TYPE.VAR_STRING, + FIELD_TYPE.VARCHAR, + FIELD_TYPE.GEOMETRY, +} + + +DEFAULT_CHARSET = 'utf8mb4' # TODO: change to utf8mb4 + +MAX_PACKET_LEN = 2**24-1 + + +def pack_int24(n): + return struct.pack('`_ in the + specification. + """ + + _sock = None + _auth_plugin_name = '' + _closed = False + _secure = False + + def __init__(self, host=None, user=None, password="", + database=None, port=0, unix_socket=None, + charset='', sql_mode=None, + read_default_file=None, conv=None, use_unicode=None, + client_flag=0, cursorclass=Cursor, init_command=None, + connect_timeout=10, ssl=None, read_default_group=None, + compress=None, named_pipe=None, + autocommit=False, db=None, passwd=None, local_infile=False, + max_allowed_packet=16*1024*1024, defer_connect=False, + auth_plugin_map=None, read_timeout=None, write_timeout=None, + bind_address=None, binary_prefix=False, program_name=None, + server_public_key=None): + if use_unicode is None and sys.version_info[0] > 2: + use_unicode = True + + if db is not None and database is None: + database = db + if passwd is not None and not password: + password = passwd + + if compress or named_pipe: + raise NotImplementedError("compress and named_pipe arguments are not supported") + + self._local_infile = bool(local_infile) + if self._local_infile: + client_flag |= CLIENT.LOCAL_FILES + + if read_default_group and not read_default_file: + if sys.platform.startswith("win"): + read_default_file = "c:\\my.ini" + else: + read_default_file = "/etc/my.cnf" + + if read_default_file: + if not read_default_group: + read_default_group = "client" + + cfg = Parser() + cfg.read(os.path.expanduser(read_default_file)) + + def _config(key, arg): + if arg: + return arg + try: + return cfg.get(read_default_group, key) + except Exception: + return arg + + user = _config("user", user) + password = _config("password", password) + host = _config("host", host) + database = _config("database", database) + unix_socket = _config("socket", unix_socket) + port = int(_config("port", port)) + bind_address = _config("bind-address", bind_address) + charset = _config("default-character-set", charset) + if not ssl: + ssl = {} + if isinstance(ssl, dict): + for key in ["ca", "capath", "cert", "key", "cipher"]: + value = _config("ssl-" + key, ssl.get(key)) + if value: + ssl[key] = value + + self.ssl = False + if ssl: + if not SSL_ENABLED: + raise NotImplementedError("ssl module not found") + self.ssl = True + client_flag |= CLIENT.SSL + self.ctx = self._create_ssl_ctx(ssl) + + self.host = host or "localhost" + self.port = port or 3306 + self.user = user or DEFAULT_USER + self.password = password or b"" + if isinstance(self.password, text_type): + self.password = self.password.encode('latin1') + self.db = database + self.unix_socket = unix_socket + self.bind_address = bind_address + if not (0 < connect_timeout <= 31536000): + raise ValueError("connect_timeout should be >0 and <=31536000") + self.connect_timeout = connect_timeout or None + if read_timeout is not None and read_timeout <= 0: + raise ValueError("read_timeout should be >= 0") + self._read_timeout = read_timeout + if write_timeout is not None and write_timeout <= 0: + raise ValueError("write_timeout should be >= 0") + self._write_timeout = write_timeout + if charset: + self.charset = charset + self.use_unicode = True + else: + self.charset = DEFAULT_CHARSET + self.use_unicode = False + + if use_unicode is not None: + self.use_unicode = use_unicode + + self.encoding = charset_by_name(self.charset).encoding + + client_flag |= CLIENT.CAPABILITIES + if self.db: + client_flag |= CLIENT.CONNECT_WITH_DB + + self.client_flag = client_flag + + self.cursorclass = cursorclass + + self._result = None + self._affected_rows = 0 + self.host_info = "Not connected" + + #: specified autocommit mode. None means use server default. + self.autocommit_mode = autocommit + + if conv is None: + conv = converters.conversions + + # Need for MySQLdb compatibility. + self.encoders = dict([(k, v) for (k, v) in conv.items() if type(k) is not int]) + self.decoders = dict([(k, v) for (k, v) in conv.items() if type(k) is int]) + self.sql_mode = sql_mode + self.init_command = init_command + self.max_allowed_packet = max_allowed_packet + self._auth_plugin_map = auth_plugin_map or {} + self._binary_prefix = binary_prefix + self.server_public_key = server_public_key + + self._connect_attrs = { + '_client_name': 'pymysql', + '_pid': str(os.getpid()), + '_client_version': VERSION_STRING, + } + if program_name: + self._connect_attrs["program_name"] = program_name + elif sys.argv: + self._connect_attrs["program_name"] = sys.argv[0] + + if defer_connect: + self._sock = None + else: + self.connect() + + def _create_ssl_ctx(self, sslp): + if isinstance(sslp, ssl.SSLContext): + return sslp + ca = sslp.get('ca') + capath = sslp.get('capath') + hasnoca = ca is None and capath is None + ctx = ssl.create_default_context(cafile=ca, capath=capath) + ctx.check_hostname = not hasnoca and sslp.get('check_hostname', True) + ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED + if 'cert' in sslp: + ctx.load_cert_chain(sslp['cert'], keyfile=sslp.get('key')) + if 'cipher' in sslp: + ctx.set_ciphers(sslp['cipher']) + ctx.options |= ssl.OP_NO_SSLv2 + ctx.options |= ssl.OP_NO_SSLv3 + return ctx + + def close(self): + """ + Send the quit message and close the socket. + + See `Connection.close() `_ + in the specification. + + :raise Error: If the connection is already closed. + """ + if self._closed: + raise err.Error("Already closed") + self._closed = True + if self._sock is None: + return + send_data = struct.pack('`_ + in the specification. + """ + self._execute_command(COMMAND.COM_QUERY, "COMMIT") + self._read_ok_packet() + + def rollback(self): + """ + Roll back the current transaction. + + See `Connection.rollback() `_ + in the specification. + """ + self._execute_command(COMMAND.COM_QUERY, "ROLLBACK") + self._read_ok_packet() + + def show_warnings(self): + """Send the "SHOW WARNINGS" SQL command.""" + self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS") + result = MySQLResult(self) + result.read() + return result.rows + + def select_db(self, db): + """ + Set current db. + + :param db: The name of the db. + """ + self._execute_command(COMMAND.COM_INIT_DB, db) + self._read_ok_packet() + + def escape(self, obj, mapping=None): + """Escape whatever value you pass to it. + + Non-standard, for internal use; do not use this in your applications. + """ + if isinstance(obj, str_type): + return "'" + self.escape_string(obj) + "'" + if isinstance(obj, (bytes, bytearray)): + ret = self._quote_bytes(obj) + if self._binary_prefix: + ret = "_binary" + ret + return ret + return converters.escape_item(obj, self.charset, mapping=mapping) + + def literal(self, obj): + """Alias for escape() + + Non-standard, for internal use; do not use this in your applications. + """ + return self.escape(obj, self.encoders) + + def escape_string(self, s): + if (self.server_status & + SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): + return s.replace("'", "''") + return converters.escape_string(s) + + def _quote_bytes(self, s): + if (self.server_status & + SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES): + return "'%s'" % (_fast_surrogateescape(s.replace(b"'", b"''")),) + return converters.escape_bytes(s) + + def cursor(self, cursor=None): + """ + Create a new cursor to execute queries with. + + :param cursor: The type of cursor to create; one of :py:class:`Cursor`, + :py:class:`SSCursor`, :py:class:`DictCursor`, or :py:class:`SSDictCursor`. + None means use Cursor. + """ + if cursor: + return cursor(self) + return self.cursorclass(self) + + def __enter__(self): + """Context manager that returns a Cursor""" + return self.cursor() + + def __exit__(self, exc, value, traceback): + """On successful exit, commit. On exception, rollback""" + if exc: + self.rollback() + else: + self.commit() + + # The following methods are INTERNAL USE ONLY (called from Cursor) + def query(self, sql, unbuffered=False): + # if DEBUG: + # print("DEBUG: sending query:", sql) + if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON): + if PY2: + sql = sql.encode(self.encoding) + else: + sql = sql.encode(self.encoding, 'surrogateescape') + self._execute_command(COMMAND.COM_QUERY, sql) + self._affected_rows = self._read_query_result(unbuffered=unbuffered) + return self._affected_rows + + def next_result(self, unbuffered=False): + self._affected_rows = self._read_query_result(unbuffered=unbuffered) + return self._affected_rows + + def affected_rows(self): + return self._affected_rows + + def kill(self, thread_id): + arg = struct.pack('= 5: + self.client_flag |= CLIENT.MULTI_RESULTS + + if self.user is None: + raise ValueError("Did not specify a username") + + charset_id = charset_by_name(self.charset).id + if isinstance(self.user, text_type): + self.user = self.user.encode(self.encoding) + + data_init = struct.pack('=5.0) + data += authresp + b'\0' + + if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB: + if isinstance(self.db, text_type): + self.db = self.db.encode(self.encoding) + data += self.db + b'\0' + + if self.server_capabilities & CLIENT.PLUGIN_AUTH: + data += (plugin_name or b'') + b'\0' + + if self.server_capabilities & CLIENT.CONNECT_ATTRS: + connect_attrs = b'' + for k, v in self._connect_attrs.items(): + k = k.encode('utf8') + connect_attrs += struct.pack('B', len(k)) + k + v = v.encode('utf8') + connect_attrs += struct.pack('B', len(v)) + v + data += struct.pack('B', len(connect_attrs)) + connect_attrs + + self.write_packet(data) + auth_packet = self._read_packet() + + # if authentication method isn't accepted the first byte + # will have the octet 254 + if auth_packet.is_auth_switch_request(): + if DEBUG: print("received auth switch") + # https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest + auth_packet.read_uint8() # 0xfe packet identifier + plugin_name = auth_packet.read_string() + if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None: + auth_packet = self._process_auth(plugin_name, auth_packet) + else: + # send legacy handshake + data = _auth.scramble_old_password(self.password, self.salt) + b'\0' + self.write_packet(data) + auth_packet = self._read_packet() + elif auth_packet.is_extra_auth_data(): + if DEBUG: + print("received extra data") + # https://dev.mysql.com/doc/internals/en/successful-authentication.html + if self._auth_plugin_name == "caching_sha2_password": + auth_packet = _auth.caching_sha2_password_auth(self, auth_packet) + elif self._auth_plugin_name == "sha256_password": + auth_packet = _auth.sha256_password_auth(self, auth_packet) + else: + raise err.OperationalError("Received extra packet for auth method %r", self._auth_plugin_name) + + if DEBUG: print("Succeed to auth") + + def _process_auth(self, plugin_name, auth_packet): + handler = self._get_auth_plugin_handler(plugin_name) + if handler: + try: + return handler.authenticate(auth_packet) + except AttributeError: + if plugin_name != b'dialog': + raise err.OperationalError(2059, "Authentication plugin '%s'" + " not loaded: - %r missing authenticate method" % (plugin_name, type(handler))) + if plugin_name == b"caching_sha2_password": + return _auth.caching_sha2_password_auth(self, auth_packet) + elif plugin_name == b"sha256_password": + return _auth.sha256_password_auth(self, auth_packet) + elif plugin_name == b"mysql_native_password": + data = _auth.scramble_native_password(self.password, auth_packet.read_all()) + elif plugin_name == b"mysql_old_password": + data = _auth.scramble_old_password(self.password, auth_packet.read_all()) + b'\0' + elif plugin_name == b"mysql_clear_password": + # https://dev.mysql.com/doc/internals/en/clear-text-authentication.html + data = self.password + b'\0' + elif plugin_name == b"dialog": + pkt = auth_packet + while True: + flag = pkt.read_uint8() + echo = (flag & 0x06) == 0x02 + last = (flag & 0x01) == 0x01 + prompt = pkt.read_all() + + if prompt == b"Password: ": + self.write_packet(self.password + b'\0') + elif handler: + resp = 'no response - TypeError within plugin.prompt method' + try: + resp = handler.prompt(echo, prompt) + self.write_packet(resp + b'\0') + except AttributeError: + raise err.OperationalError(2059, "Authentication plugin '%s'" \ + " not loaded: - %r missing prompt method" % (plugin_name, handler)) + except TypeError: + raise err.OperationalError(2061, "Authentication plugin '%s'" \ + " %r didn't respond with string. Returned '%r' to prompt %r" % (plugin_name, handler, resp, prompt)) + else: + raise err.OperationalError(2059, "Authentication plugin '%s' (%r) not configured" % (plugin_name, handler)) + pkt = self._read_packet() + pkt.check_error() + if pkt.is_ok_packet() or last: + break + return pkt + else: + raise err.OperationalError(2059, "Authentication plugin '%s' not configured" % plugin_name) + + self.write_packet(data) + pkt = self._read_packet() + pkt.check_error() + return pkt + + def _get_auth_plugin_handler(self, plugin_name): + plugin_class = self._auth_plugin_map.get(plugin_name) + if not plugin_class and isinstance(plugin_name, bytes): + plugin_class = self._auth_plugin_map.get(plugin_name.decode('ascii')) + if plugin_class: + try: + handler = plugin_class(self) + except TypeError: + raise err.OperationalError(2059, "Authentication plugin '%s'" + " not loaded: - %r cannot be constructed with connection object" % (plugin_name, plugin_class)) + else: + handler = None + return handler + + # _mysql support + def thread_id(self): + return self.server_thread_id[0] + + def character_set_name(self): + return self.charset + + def get_host_info(self): + return self.host_info + + def get_proto_info(self): + return self.protocol_version + + def _get_server_information(self): + i = 0 + packet = self._read_packet() + data = packet.get_all_data() + + self.protocol_version = byte2int(data[i:i+1]) + i += 1 + + server_end = data.find(b'\0', i) + self.server_version = data[i:server_end].decode('latin1') + i = server_end + 1 + + self.server_thread_id = struct.unpack('= i + 6: + lang, stat, cap_h, salt_len = struct.unpack('= i + salt_len: + # salt_len includes auth_plugin_data_part_1 and filler + self.salt += data[i:i+salt_len] + i += salt_len + + i+=1 + # AUTH PLUGIN NAME may appear here. + if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i: + # Due to Bug#59453 the auth-plugin-name is missing the terminating + # NUL-char in versions prior to 5.5.10 and 5.6.2. + # ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake + # didn't use version checks as mariadb is corrected and reports + # earlier than those two. + server_end = data.find(b'\0', i) + if server_end < 0: # pragma: no cover - very specific upstream bug + # not found \0 and last field so take it all + self._auth_plugin_name = data[i:].decode('utf-8') + else: + self._auth_plugin_name = data[i:server_end].decode('utf-8') + + def get_server_info(self): + return self.server_version + + Warning = err.Warning + Error = err.Error + InterfaceError = err.InterfaceError + DatabaseError = err.DatabaseError + DataError = err.DataError + OperationalError = err.OperationalError + IntegrityError = err.IntegrityError + InternalError = err.InternalError + ProgrammingError = err.ProgrammingError + NotSupportedError = err.NotSupportedError + + +class MySQLResult(object): + + def __init__(self, connection): + """ + :type connection: Connection + """ + self.connection = connection + self.affected_rows = None + self.insert_id = None + self.server_status = None + self.warning_count = 0 + self.message = None + self.field_count = 0 + self.description = None + self.rows = None + self.has_next = None + self.unbuffered_active = False + + def __del__(self): + if self.unbuffered_active: + self._finish_unbuffered_query() + + def read(self): + try: + first_packet = self.connection._read_packet() + + if first_packet.is_ok_packet(): + self._read_ok_packet(first_packet) + elif first_packet.is_load_local_packet(): + self._read_load_local_packet(first_packet) + else: + self._read_result_packet(first_packet) + finally: + self.connection = None + + def init_unbuffered_query(self): + """ + :raise OperationalError: If the connection to the MySQL server is lost. + :raise InternalError: + """ + self.unbuffered_active = True + first_packet = self.connection._read_packet() + + if first_packet.is_ok_packet(): + self._read_ok_packet(first_packet) + self.unbuffered_active = False + self.connection = None + elif first_packet.is_load_local_packet(): + self._read_load_local_packet(first_packet) + self.unbuffered_active = False + self.connection = None + else: + self.field_count = first_packet.read_length_encoded_integer() + self._get_descriptions() + + # Apparently, MySQLdb picks this number because it's the maximum + # value of a 64bit unsigned integer. Since we're emulating MySQLdb, + # we set it to this instead of None, which would be preferred. + self.affected_rows = 18446744073709551615 + + def _read_ok_packet(self, first_packet): + ok_packet = OKPacketWrapper(first_packet) + self.affected_rows = ok_packet.affected_rows + self.insert_id = ok_packet.insert_id + self.server_status = ok_packet.server_status + self.warning_count = ok_packet.warning_count + self.message = ok_packet.message + self.has_next = ok_packet.has_next + + def _read_load_local_packet(self, first_packet): + if not self.connection._local_infile: + raise RuntimeError( + "**WARN**: Received LOAD_LOCAL packet but local_infile option is false.") + load_packet = LoadLocalPacketWrapper(first_packet) + sender = LoadLocalFile(load_packet.filename, self.connection) + try: + sender.send_data() + except: + self.connection._read_packet() # skip ok packet + raise + + ok_packet = self.connection._read_packet() + if not ok_packet.is_ok_packet(): # pragma: no cover - upstream induced protocol error + raise err.OperationalError(2014, "Commands Out of Sync") + self._read_ok_packet(ok_packet) + + def _check_packet_is_eof(self, packet): + if not packet.is_eof_packet(): + return False + #TODO: Support CLIENT.DEPRECATE_EOF + # 1) Add DEPRECATE_EOF to CAPABILITIES + # 2) Mask CAPABILITIES with server_capabilities + # 3) if server_capabilities & CLIENT.DEPRECATE_EOF: use OKPacketWrapper instead of EOFPacketWrapper + wp = EOFPacketWrapper(packet) + self.warning_count = wp.warning_count + self.has_next = wp.has_next + return True + + def _read_result_packet(self, first_packet): + self.field_count = first_packet.read_length_encoded_integer() + self._get_descriptions() + self._read_rowdata_packet() + + def _read_rowdata_packet_unbuffered(self): + # Check if in an active query + if not self.unbuffered_active: + return + + # EOF + packet = self.connection._read_packet() + if self._check_packet_is_eof(packet): + self.unbuffered_active = False + self.connection = None + self.rows = None + return + + row = self._read_row_from_packet(packet) + self.affected_rows = 1 + self.rows = (row,) # rows should tuple of row for MySQL-python compatibility. + return row + + def _finish_unbuffered_query(self): + # After much reading on the MySQL protocol, it appears that there is, + # in fact, no way to stop MySQL from sending all the data after + # executing a query, so we just spin, and wait for an EOF packet. + while self.unbuffered_active: + packet = self.connection._read_packet() + if self._check_packet_is_eof(packet): + self.unbuffered_active = False + self.connection = None # release reference to kill cyclic reference. + + def _read_rowdata_packet(self): + """Read a rowdata packet for each data row in the result set.""" + rows = [] + while True: + packet = self.connection._read_packet() + if self._check_packet_is_eof(packet): + self.connection = None # release reference to kill cyclic reference. + break + rows.append(self._read_row_from_packet(packet)) + + self.affected_rows = len(rows) + self.rows = tuple(rows) + + def _read_row_from_packet(self, packet): + row = [] + for encoding, converter in self.converters: + try: + data = packet.read_length_coded_string() + except IndexError: + # No more columns in this row + # See https://github.com/PyMySQL/PyMySQL/pull/434 + break + if data is not None: + if encoding is not None: + data = data.decode(encoding) + if DEBUG: print("DEBUG: DATA = ", data) + if converter is not None: + data = converter(data) + row.append(data) + return tuple(row) + + def _get_descriptions(self): + """Read a column descriptor packet for each column in the result.""" + self.fields = [] + self.converters = [] + use_unicode = self.connection.use_unicode + conn_encoding = self.connection.encoding + description = [] + + for i in range_type(self.field_count): + field = self.connection._read_packet(FieldDescriptorPacket) + self.fields.append(field) + description.append(field.description()) + field_type = field.type_code + if use_unicode: + if field_type == FIELD_TYPE.JSON: + # When SELECT from JSON column: charset = binary + # When SELECT CAST(... AS JSON): charset = connection encoding + # This behavior is different from TEXT / BLOB. + # We should decode result by connection encoding regardless charsetnr. + # See https://github.com/PyMySQL/PyMySQL/issues/488 + encoding = conn_encoding # SELECT CAST(... AS JSON) + elif field_type in TEXT_TYPES: + if field.charsetnr == 63: # binary + # TEXTs with charset=binary means BINARY types. + encoding = None + else: + encoding = conn_encoding + else: + # Integers, Dates and Times, and other basic data is encoded in ascii + encoding = 'ascii' + else: + encoding = None + converter = self.connection.decoders.get(field_type) + if converter is converters.through: + converter = None + if DEBUG: print("DEBUG: field={}, converter={}".format(field, converter)) + self.converters.append((encoding, converter)) + + eof_packet = self.connection._read_packet() + assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' + self.description = tuple(description) + + +class LoadLocalFile(object): + def __init__(self, filename, connection): + self.filename = filename + self.connection = connection + + def send_data(self): + """Send data packets from the local file to the server""" + if not self.connection._sock: + raise err.InterfaceError("(0, '')") + conn = self.connection + + try: + with open(self.filename, 'rb') as open_file: + packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough + while True: + chunk = open_file.read(packet_size) + if not chunk: + break + conn.write_packet(chunk) + except IOError: + raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename)) + finally: + # send the empty packet to signify we are done sending data + conn.write_packet(b'') diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/CLIENT.py b/server/www/packages/packages-windows/x86/pymysql/constants/CLIENT.py new file mode 100644 index 0000000..b42f152 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/constants/CLIENT.py @@ -0,0 +1,31 @@ +# https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +LONG_PASSWORD = 1 +FOUND_ROWS = 1 << 1 +LONG_FLAG = 1 << 2 +CONNECT_WITH_DB = 1 << 3 +NO_SCHEMA = 1 << 4 +COMPRESS = 1 << 5 +ODBC = 1 << 6 +LOCAL_FILES = 1 << 7 +IGNORE_SPACE = 1 << 8 +PROTOCOL_41 = 1 << 9 +INTERACTIVE = 1 << 10 +SSL = 1 << 11 +IGNORE_SIGPIPE = 1 << 12 +TRANSACTIONS = 1 << 13 +SECURE_CONNECTION = 1 << 15 +MULTI_STATEMENTS = 1 << 16 +MULTI_RESULTS = 1 << 17 +PS_MULTI_RESULTS = 1 << 18 +PLUGIN_AUTH = 1 << 19 +CONNECT_ATTRS = 1 << 20 +PLUGIN_AUTH_LENENC_CLIENT_DATA = 1 << 21 +CAPABILITIES = ( + LONG_PASSWORD | LONG_FLAG | PROTOCOL_41 | TRANSACTIONS + | SECURE_CONNECTION | MULTI_RESULTS + | PLUGIN_AUTH | PLUGIN_AUTH_LENENC_CLIENT_DATA | CONNECT_ATTRS) + +# Not done yet +HANDLE_EXPIRED_PASSWORDS = 1 << 22 +SESSION_TRACK = 1 << 23 +DEPRECATE_EOF = 1 << 24 diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/COMMAND.py b/server/www/packages/packages-windows/x86/pymysql/constants/COMMAND.py new file mode 100644 index 0000000..1da2755 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/constants/COMMAND.py @@ -0,0 +1,33 @@ + +COM_SLEEP = 0x00 +COM_QUIT = 0x01 +COM_INIT_DB = 0x02 +COM_QUERY = 0x03 +COM_FIELD_LIST = 0x04 +COM_CREATE_DB = 0x05 +COM_DROP_DB = 0x06 +COM_REFRESH = 0x07 +COM_SHUTDOWN = 0x08 +COM_STATISTICS = 0x09 +COM_PROCESS_INFO = 0x0a +COM_CONNECT = 0x0b +COM_PROCESS_KILL = 0x0c +COM_DEBUG = 0x0d +COM_PING = 0x0e +COM_TIME = 0x0f +COM_DELAYED_INSERT = 0x10 +COM_CHANGE_USER = 0x11 +COM_BINLOG_DUMP = 0x12 +COM_TABLE_DUMP = 0x13 +COM_CONNECT_OUT = 0x14 +COM_REGISTER_SLAVE = 0x15 +COM_STMT_PREPARE = 0x16 +COM_STMT_EXECUTE = 0x17 +COM_STMT_SEND_LONG_DATA = 0x18 +COM_STMT_CLOSE = 0x19 +COM_STMT_RESET = 0x1a +COM_SET_OPTION = 0x1b +COM_STMT_FETCH = 0x1c +COM_DAEMON = 0x1d +COM_BINLOG_DUMP_GTID = 0x1e +COM_END = 0x1f diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/CR.py b/server/www/packages/packages-windows/x86/pymysql/constants/CR.py new file mode 100644 index 0000000..48ca956 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/constants/CR.py @@ -0,0 +1,68 @@ +# flake8: noqa +# errmsg.h +CR_ERROR_FIRST = 2000 +CR_UNKNOWN_ERROR = 2000 +CR_SOCKET_CREATE_ERROR = 2001 +CR_CONNECTION_ERROR = 2002 +CR_CONN_HOST_ERROR = 2003 +CR_IPSOCK_ERROR = 2004 +CR_UNKNOWN_HOST = 2005 +CR_SERVER_GONE_ERROR = 2006 +CR_VERSION_ERROR = 2007 +CR_OUT_OF_MEMORY = 2008 +CR_WRONG_HOST_INFO = 2009 +CR_LOCALHOST_CONNECTION = 2010 +CR_TCP_CONNECTION = 2011 +CR_SERVER_HANDSHAKE_ERR = 2012 +CR_SERVER_LOST = 2013 +CR_COMMANDS_OUT_OF_SYNC = 2014 +CR_NAMEDPIPE_CONNECTION = 2015 +CR_NAMEDPIPEWAIT_ERROR = 2016 +CR_NAMEDPIPEOPEN_ERROR = 2017 +CR_NAMEDPIPESETSTATE_ERROR = 2018 +CR_CANT_READ_CHARSET = 2019 +CR_NET_PACKET_TOO_LARGE = 2020 +CR_EMBEDDED_CONNECTION = 2021 +CR_PROBE_SLAVE_STATUS = 2022 +CR_PROBE_SLAVE_HOSTS = 2023 +CR_PROBE_SLAVE_CONNECT = 2024 +CR_PROBE_MASTER_CONNECT = 2025 +CR_SSL_CONNECTION_ERROR = 2026 +CR_MALFORMED_PACKET = 2027 +CR_WRONG_LICENSE = 2028 + +CR_NULL_POINTER = 2029 +CR_NO_PREPARE_STMT = 2030 +CR_PARAMS_NOT_BOUND = 2031 +CR_DATA_TRUNCATED = 2032 +CR_NO_PARAMETERS_EXISTS = 2033 +CR_INVALID_PARAMETER_NO = 2034 +CR_INVALID_BUFFER_USE = 2035 +CR_UNSUPPORTED_PARAM_TYPE = 2036 + +CR_SHARED_MEMORY_CONNECTION = 2037 +CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038 +CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039 +CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040 +CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041 +CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042 +CR_SHARED_MEMORY_MAP_ERROR = 2043 +CR_SHARED_MEMORY_EVENT_ERROR = 2044 +CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045 +CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046 +CR_CONN_UNKNOW_PROTOCOL = 2047 +CR_INVALID_CONN_HANDLE = 2048 +CR_SECURE_AUTH = 2049 +CR_FETCH_CANCELED = 2050 +CR_NO_DATA = 2051 +CR_NO_STMT_METADATA = 2052 +CR_NO_RESULT_SET = 2053 +CR_NOT_IMPLEMENTED = 2054 +CR_SERVER_LOST_EXTENDED = 2055 +CR_STMT_CLOSED = 2056 +CR_NEW_STMT_METADATA = 2057 +CR_ALREADY_CONNECTED = 2058 +CR_AUTH_PLUGIN_CANNOT_LOAD = 2059 +CR_DUPLICATE_CONNECTION_ATTR = 2060 +CR_AUTH_PLUGIN_ERR = 2061 +CR_ERROR_LAST = 2061 diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/ER.py b/server/www/packages/packages-windows/x86/pymysql/constants/ER.py new file mode 100644 index 0000000..79b88af --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/constants/ER.py @@ -0,0 +1,475 @@ + +ERROR_FIRST = 1000 +HASHCHK = 1000 +NISAMCHK = 1001 +NO = 1002 +YES = 1003 +CANT_CREATE_FILE = 1004 +CANT_CREATE_TABLE = 1005 +CANT_CREATE_DB = 1006 +DB_CREATE_EXISTS = 1007 +DB_DROP_EXISTS = 1008 +DB_DROP_DELETE = 1009 +DB_DROP_RMDIR = 1010 +CANT_DELETE_FILE = 1011 +CANT_FIND_SYSTEM_REC = 1012 +CANT_GET_STAT = 1013 +CANT_GET_WD = 1014 +CANT_LOCK = 1015 +CANT_OPEN_FILE = 1016 +FILE_NOT_FOUND = 1017 +CANT_READ_DIR = 1018 +CANT_SET_WD = 1019 +CHECKREAD = 1020 +DISK_FULL = 1021 +DUP_KEY = 1022 +ERROR_ON_CLOSE = 1023 +ERROR_ON_READ = 1024 +ERROR_ON_RENAME = 1025 +ERROR_ON_WRITE = 1026 +FILE_USED = 1027 +FILSORT_ABORT = 1028 +FORM_NOT_FOUND = 1029 +GET_ERRNO = 1030 +ILLEGAL_HA = 1031 +KEY_NOT_FOUND = 1032 +NOT_FORM_FILE = 1033 +NOT_KEYFILE = 1034 +OLD_KEYFILE = 1035 +OPEN_AS_READONLY = 1036 +OUTOFMEMORY = 1037 +OUT_OF_SORTMEMORY = 1038 +UNEXPECTED_EOF = 1039 +CON_COUNT_ERROR = 1040 +OUT_OF_RESOURCES = 1041 +BAD_HOST_ERROR = 1042 +HANDSHAKE_ERROR = 1043 +DBACCESS_DENIED_ERROR = 1044 +ACCESS_DENIED_ERROR = 1045 +NO_DB_ERROR = 1046 +UNKNOWN_COM_ERROR = 1047 +BAD_NULL_ERROR = 1048 +BAD_DB_ERROR = 1049 +TABLE_EXISTS_ERROR = 1050 +BAD_TABLE_ERROR = 1051 +NON_UNIQ_ERROR = 1052 +SERVER_SHUTDOWN = 1053 +BAD_FIELD_ERROR = 1054 +WRONG_FIELD_WITH_GROUP = 1055 +WRONG_GROUP_FIELD = 1056 +WRONG_SUM_SELECT = 1057 +WRONG_VALUE_COUNT = 1058 +TOO_LONG_IDENT = 1059 +DUP_FIELDNAME = 1060 +DUP_KEYNAME = 1061 +DUP_ENTRY = 1062 +WRONG_FIELD_SPEC = 1063 +PARSE_ERROR = 1064 +EMPTY_QUERY = 1065 +NONUNIQ_TABLE = 1066 +INVALID_DEFAULT = 1067 +MULTIPLE_PRI_KEY = 1068 +TOO_MANY_KEYS = 1069 +TOO_MANY_KEY_PARTS = 1070 +TOO_LONG_KEY = 1071 +KEY_COLUMN_DOES_NOT_EXITS = 1072 +BLOB_USED_AS_KEY = 1073 +TOO_BIG_FIELDLENGTH = 1074 +WRONG_AUTO_KEY = 1075 +READY = 1076 +NORMAL_SHUTDOWN = 1077 +GOT_SIGNAL = 1078 +SHUTDOWN_COMPLETE = 1079 +FORCING_CLOSE = 1080 +IPSOCK_ERROR = 1081 +NO_SUCH_INDEX = 1082 +WRONG_FIELD_TERMINATORS = 1083 +BLOBS_AND_NO_TERMINATED = 1084 +TEXTFILE_NOT_READABLE = 1085 +FILE_EXISTS_ERROR = 1086 +LOAD_INFO = 1087 +ALTER_INFO = 1088 +WRONG_SUB_KEY = 1089 +CANT_REMOVE_ALL_FIELDS = 1090 +CANT_DROP_FIELD_OR_KEY = 1091 +INSERT_INFO = 1092 +UPDATE_TABLE_USED = 1093 +NO_SUCH_THREAD = 1094 +KILL_DENIED_ERROR = 1095 +NO_TABLES_USED = 1096 +TOO_BIG_SET = 1097 +NO_UNIQUE_LOGFILE = 1098 +TABLE_NOT_LOCKED_FOR_WRITE = 1099 +TABLE_NOT_LOCKED = 1100 +BLOB_CANT_HAVE_DEFAULT = 1101 +WRONG_DB_NAME = 1102 +WRONG_TABLE_NAME = 1103 +TOO_BIG_SELECT = 1104 +UNKNOWN_ERROR = 1105 +UNKNOWN_PROCEDURE = 1106 +WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 +WRONG_PARAMETERS_TO_PROCEDURE = 1108 +UNKNOWN_TABLE = 1109 +FIELD_SPECIFIED_TWICE = 1110 +INVALID_GROUP_FUNC_USE = 1111 +UNSUPPORTED_EXTENSION = 1112 +TABLE_MUST_HAVE_COLUMNS = 1113 +RECORD_FILE_FULL = 1114 +UNKNOWN_CHARACTER_SET = 1115 +TOO_MANY_TABLES = 1116 +TOO_MANY_FIELDS = 1117 +TOO_BIG_ROWSIZE = 1118 +STACK_OVERRUN = 1119 +WRONG_OUTER_JOIN = 1120 +NULL_COLUMN_IN_INDEX = 1121 +CANT_FIND_UDF = 1122 +CANT_INITIALIZE_UDF = 1123 +UDF_NO_PATHS = 1124 +UDF_EXISTS = 1125 +CANT_OPEN_LIBRARY = 1126 +CANT_FIND_DL_ENTRY = 1127 +FUNCTION_NOT_DEFINED = 1128 +HOST_IS_BLOCKED = 1129 +HOST_NOT_PRIVILEGED = 1130 +PASSWORD_ANONYMOUS_USER = 1131 +PASSWORD_NOT_ALLOWED = 1132 +PASSWORD_NO_MATCH = 1133 +UPDATE_INFO = 1134 +CANT_CREATE_THREAD = 1135 +WRONG_VALUE_COUNT_ON_ROW = 1136 +CANT_REOPEN_TABLE = 1137 +INVALID_USE_OF_NULL = 1138 +REGEXP_ERROR = 1139 +MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 +NONEXISTING_GRANT = 1141 +TABLEACCESS_DENIED_ERROR = 1142 +COLUMNACCESS_DENIED_ERROR = 1143 +ILLEGAL_GRANT_FOR_TABLE = 1144 +GRANT_WRONG_HOST_OR_USER = 1145 +NO_SUCH_TABLE = 1146 +NONEXISTING_TABLE_GRANT = 1147 +NOT_ALLOWED_COMMAND = 1148 +SYNTAX_ERROR = 1149 +DELAYED_CANT_CHANGE_LOCK = 1150 +TOO_MANY_DELAYED_THREADS = 1151 +ABORTING_CONNECTION = 1152 +NET_PACKET_TOO_LARGE = 1153 +NET_READ_ERROR_FROM_PIPE = 1154 +NET_FCNTL_ERROR = 1155 +NET_PACKETS_OUT_OF_ORDER = 1156 +NET_UNCOMPRESS_ERROR = 1157 +NET_READ_ERROR = 1158 +NET_READ_INTERRUPTED = 1159 +NET_ERROR_ON_WRITE = 1160 +NET_WRITE_INTERRUPTED = 1161 +TOO_LONG_STRING = 1162 +TABLE_CANT_HANDLE_BLOB = 1163 +TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 +DELAYED_INSERT_TABLE_LOCKED = 1165 +WRONG_COLUMN_NAME = 1166 +WRONG_KEY_COLUMN = 1167 +WRONG_MRG_TABLE = 1168 +DUP_UNIQUE = 1169 +BLOB_KEY_WITHOUT_LENGTH = 1170 +PRIMARY_CANT_HAVE_NULL = 1171 +TOO_MANY_ROWS = 1172 +REQUIRES_PRIMARY_KEY = 1173 +NO_RAID_COMPILED = 1174 +UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 +KEY_DOES_NOT_EXITS = 1176 +CHECK_NO_SUCH_TABLE = 1177 +CHECK_NOT_IMPLEMENTED = 1178 +CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 +ERROR_DURING_COMMIT = 1180 +ERROR_DURING_ROLLBACK = 1181 +ERROR_DURING_FLUSH_LOGS = 1182 +ERROR_DURING_CHECKPOINT = 1183 +NEW_ABORTING_CONNECTION = 1184 +DUMP_NOT_IMPLEMENTED = 1185 +FLUSH_MASTER_BINLOG_CLOSED = 1186 +INDEX_REBUILD = 1187 +MASTER = 1188 +MASTER_NET_READ = 1189 +MASTER_NET_WRITE = 1190 +FT_MATCHING_KEY_NOT_FOUND = 1191 +LOCK_OR_ACTIVE_TRANSACTION = 1192 +UNKNOWN_SYSTEM_VARIABLE = 1193 +CRASHED_ON_USAGE = 1194 +CRASHED_ON_REPAIR = 1195 +WARNING_NOT_COMPLETE_ROLLBACK = 1196 +TRANS_CACHE_FULL = 1197 +SLAVE_MUST_STOP = 1198 +SLAVE_NOT_RUNNING = 1199 +BAD_SLAVE = 1200 +MASTER_INFO = 1201 +SLAVE_THREAD = 1202 +TOO_MANY_USER_CONNECTIONS = 1203 +SET_CONSTANTS_ONLY = 1204 +LOCK_WAIT_TIMEOUT = 1205 +LOCK_TABLE_FULL = 1206 +READ_ONLY_TRANSACTION = 1207 +DROP_DB_WITH_READ_LOCK = 1208 +CREATE_DB_WITH_READ_LOCK = 1209 +WRONG_ARGUMENTS = 1210 +NO_PERMISSION_TO_CREATE_USER = 1211 +UNION_TABLES_IN_DIFFERENT_DIR = 1212 +LOCK_DEADLOCK = 1213 +TABLE_CANT_HANDLE_FT = 1214 +CANNOT_ADD_FOREIGN = 1215 +NO_REFERENCED_ROW = 1216 +ROW_IS_REFERENCED = 1217 +CONNECT_TO_MASTER = 1218 +QUERY_ON_MASTER = 1219 +ERROR_WHEN_EXECUTING_COMMAND = 1220 +WRONG_USAGE = 1221 +WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 +CANT_UPDATE_WITH_READLOCK = 1223 +MIXING_NOT_ALLOWED = 1224 +DUP_ARGUMENT = 1225 +USER_LIMIT_REACHED = 1226 +SPECIFIC_ACCESS_DENIED_ERROR = 1227 +LOCAL_VARIABLE = 1228 +GLOBAL_VARIABLE = 1229 +NO_DEFAULT = 1230 +WRONG_VALUE_FOR_VAR = 1231 +WRONG_TYPE_FOR_VAR = 1232 +VAR_CANT_BE_READ = 1233 +CANT_USE_OPTION_HERE = 1234 +NOT_SUPPORTED_YET = 1235 +MASTER_FATAL_ERROR_READING_BINLOG = 1236 +SLAVE_IGNORED_TABLE = 1237 +INCORRECT_GLOBAL_LOCAL_VAR = 1238 +WRONG_FK_DEF = 1239 +KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 +OPERAND_COLUMNS = 1241 +SUBQUERY_NO_1_ROW = 1242 +UNKNOWN_STMT_HANDLER = 1243 +CORRUPT_HELP_DB = 1244 +CYCLIC_REFERENCE = 1245 +AUTO_CONVERT = 1246 +ILLEGAL_REFERENCE = 1247 +DERIVED_MUST_HAVE_ALIAS = 1248 +SELECT_REDUCED = 1249 +TABLENAME_NOT_ALLOWED_HERE = 1250 +NOT_SUPPORTED_AUTH_MODE = 1251 +SPATIAL_CANT_HAVE_NULL = 1252 +COLLATION_CHARSET_MISMATCH = 1253 +SLAVE_WAS_RUNNING = 1254 +SLAVE_WAS_NOT_RUNNING = 1255 +TOO_BIG_FOR_UNCOMPRESS = 1256 +ZLIB_Z_MEM_ERROR = 1257 +ZLIB_Z_BUF_ERROR = 1258 +ZLIB_Z_DATA_ERROR = 1259 +CUT_VALUE_GROUP_CONCAT = 1260 +WARN_TOO_FEW_RECORDS = 1261 +WARN_TOO_MANY_RECORDS = 1262 +WARN_NULL_TO_NOTNULL = 1263 +WARN_DATA_OUT_OF_RANGE = 1264 +WARN_DATA_TRUNCATED = 1265 +WARN_USING_OTHER_HANDLER = 1266 +CANT_AGGREGATE_2COLLATIONS = 1267 +DROP_USER = 1268 +REVOKE_GRANTS = 1269 +CANT_AGGREGATE_3COLLATIONS = 1270 +CANT_AGGREGATE_NCOLLATIONS = 1271 +VARIABLE_IS_NOT_STRUCT = 1272 +UNKNOWN_COLLATION = 1273 +SLAVE_IGNORED_SSL_PARAMS = 1274 +SERVER_IS_IN_SECURE_AUTH_MODE = 1275 +WARN_FIELD_RESOLVED = 1276 +BAD_SLAVE_UNTIL_COND = 1277 +MISSING_SKIP_SLAVE = 1278 +UNTIL_COND_IGNORED = 1279 +WRONG_NAME_FOR_INDEX = 1280 +WRONG_NAME_FOR_CATALOG = 1281 +WARN_QC_RESIZE = 1282 +BAD_FT_COLUMN = 1283 +UNKNOWN_KEY_CACHE = 1284 +WARN_HOSTNAME_WONT_WORK = 1285 +UNKNOWN_STORAGE_ENGINE = 1286 +WARN_DEPRECATED_SYNTAX = 1287 +NON_UPDATABLE_TABLE = 1288 +FEATURE_DISABLED = 1289 +OPTION_PREVENTS_STATEMENT = 1290 +DUPLICATED_VALUE_IN_TYPE = 1291 +TRUNCATED_WRONG_VALUE = 1292 +TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 +INVALID_ON_UPDATE = 1294 +UNSUPPORTED_PS = 1295 +GET_ERRMSG = 1296 +GET_TEMPORARY_ERRMSG = 1297 +UNKNOWN_TIME_ZONE = 1298 +WARN_INVALID_TIMESTAMP = 1299 +INVALID_CHARACTER_STRING = 1300 +WARN_ALLOWED_PACKET_OVERFLOWED = 1301 +CONFLICTING_DECLARATIONS = 1302 +SP_NO_RECURSIVE_CREATE = 1303 +SP_ALREADY_EXISTS = 1304 +SP_DOES_NOT_EXIST = 1305 +SP_DROP_FAILED = 1306 +SP_STORE_FAILED = 1307 +SP_LILABEL_MISMATCH = 1308 +SP_LABEL_REDEFINE = 1309 +SP_LABEL_MISMATCH = 1310 +SP_UNINIT_VAR = 1311 +SP_BADSELECT = 1312 +SP_BADRETURN = 1313 +SP_BADSTATEMENT = 1314 +UPDATE_LOG_DEPRECATED_IGNORED = 1315 +UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 +QUERY_INTERRUPTED = 1317 +SP_WRONG_NO_OF_ARGS = 1318 +SP_COND_MISMATCH = 1319 +SP_NORETURN = 1320 +SP_NORETURNEND = 1321 +SP_BAD_CURSOR_QUERY = 1322 +SP_BAD_CURSOR_SELECT = 1323 +SP_CURSOR_MISMATCH = 1324 +SP_CURSOR_ALREADY_OPEN = 1325 +SP_CURSOR_NOT_OPEN = 1326 +SP_UNDECLARED_VAR = 1327 +SP_WRONG_NO_OF_FETCH_ARGS = 1328 +SP_FETCH_NO_DATA = 1329 +SP_DUP_PARAM = 1330 +SP_DUP_VAR = 1331 +SP_DUP_COND = 1332 +SP_DUP_CURS = 1333 +SP_CANT_ALTER = 1334 +SP_SUBSELECT_NYI = 1335 +STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 +SP_VARCOND_AFTER_CURSHNDLR = 1337 +SP_CURSOR_AFTER_HANDLER = 1338 +SP_CASE_NOT_FOUND = 1339 +FPARSER_TOO_BIG_FILE = 1340 +FPARSER_BAD_HEADER = 1341 +FPARSER_EOF_IN_COMMENT = 1342 +FPARSER_ERROR_IN_PARAMETER = 1343 +FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 +VIEW_NO_EXPLAIN = 1345 +FRM_UNKNOWN_TYPE = 1346 +WRONG_OBJECT = 1347 +NONUPDATEABLE_COLUMN = 1348 +VIEW_SELECT_DERIVED = 1349 +VIEW_SELECT_CLAUSE = 1350 +VIEW_SELECT_VARIABLE = 1351 +VIEW_SELECT_TMPTABLE = 1352 +VIEW_WRONG_LIST = 1353 +WARN_VIEW_MERGE = 1354 +WARN_VIEW_WITHOUT_KEY = 1355 +VIEW_INVALID = 1356 +SP_NO_DROP_SP = 1357 +SP_GOTO_IN_HNDLR = 1358 +TRG_ALREADY_EXISTS = 1359 +TRG_DOES_NOT_EXIST = 1360 +TRG_ON_VIEW_OR_TEMP_TABLE = 1361 +TRG_CANT_CHANGE_ROW = 1362 +TRG_NO_SUCH_ROW_IN_TRG = 1363 +NO_DEFAULT_FOR_FIELD = 1364 +DIVISION_BY_ZERO = 1365 +TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 +ILLEGAL_VALUE_FOR_TYPE = 1367 +VIEW_NONUPD_CHECK = 1368 +VIEW_CHECK_FAILED = 1369 +PROCACCESS_DENIED_ERROR = 1370 +RELAY_LOG_FAIL = 1371 +PASSWD_LENGTH = 1372 +UNKNOWN_TARGET_BINLOG = 1373 +IO_ERR_LOG_INDEX_READ = 1374 +BINLOG_PURGE_PROHIBITED = 1375 +FSEEK_FAIL = 1376 +BINLOG_PURGE_FATAL_ERR = 1377 +LOG_IN_USE = 1378 +LOG_PURGE_UNKNOWN_ERR = 1379 +RELAY_LOG_INIT = 1380 +NO_BINARY_LOGGING = 1381 +RESERVED_SYNTAX = 1382 +WSAS_FAILED = 1383 +DIFF_GROUPS_PROC = 1384 +NO_GROUP_FOR_PROC = 1385 +ORDER_WITH_PROC = 1386 +LOGGING_PROHIBIT_CHANGING_OF = 1387 +NO_FILE_MAPPING = 1388 +WRONG_MAGIC = 1389 +PS_MANY_PARAM = 1390 +KEY_PART_0 = 1391 +VIEW_CHECKSUM = 1392 +VIEW_MULTIUPDATE = 1393 +VIEW_NO_INSERT_FIELD_LIST = 1394 +VIEW_DELETE_MERGE_VIEW = 1395 +CANNOT_USER = 1396 +XAER_NOTA = 1397 +XAER_INVAL = 1398 +XAER_RMFAIL = 1399 +XAER_OUTSIDE = 1400 +XAER_RMERR = 1401 +XA_RBROLLBACK = 1402 +NONEXISTING_PROC_GRANT = 1403 +PROC_AUTO_GRANT_FAIL = 1404 +PROC_AUTO_REVOKE_FAIL = 1405 +DATA_TOO_LONG = 1406 +SP_BAD_SQLSTATE = 1407 +STARTUP = 1408 +LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 +CANT_CREATE_USER_WITH_GRANT = 1410 +WRONG_VALUE_FOR_TYPE = 1411 +TABLE_DEF_CHANGED = 1412 +SP_DUP_HANDLER = 1413 +SP_NOT_VAR_ARG = 1414 +SP_NO_RETSET = 1415 +CANT_CREATE_GEOMETRY_OBJECT = 1416 +FAILED_ROUTINE_BREAK_BINLOG = 1417 +BINLOG_UNSAFE_ROUTINE = 1418 +BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 +EXEC_STMT_WITH_OPEN_CURSOR = 1420 +STMT_HAS_NO_OPEN_CURSOR = 1421 +COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 +NO_DEFAULT_FOR_VIEW_FIELD = 1423 +SP_NO_RECURSION = 1424 +TOO_BIG_SCALE = 1425 +TOO_BIG_PRECISION = 1426 +M_BIGGER_THAN_D = 1427 +WRONG_LOCK_OF_SYSTEM_TABLE = 1428 +CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 +QUERY_ON_FOREIGN_DATA_SOURCE = 1430 +FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 +FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 +FOREIGN_DATA_STRING_INVALID = 1433 +CANT_CREATE_FEDERATED_TABLE = 1434 +TRG_IN_WRONG_SCHEMA = 1435 +STACK_OVERRUN_NEED_MORE = 1436 +TOO_LONG_BODY = 1437 +WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 +TOO_BIG_DISPLAYWIDTH = 1439 +XAER_DUPID = 1440 +DATETIME_FUNCTION_OVERFLOW = 1441 +CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 +VIEW_PREVENT_UPDATE = 1443 +PS_NO_RECURSION = 1444 +SP_CANT_SET_AUTOCOMMIT = 1445 +MALFORMED_DEFINER = 1446 +VIEW_FRM_NO_USER = 1447 +VIEW_OTHER_USER = 1448 +NO_SUCH_USER = 1449 +FORBID_SCHEMA_CHANGE = 1450 +ROW_IS_REFERENCED_2 = 1451 +NO_REFERENCED_ROW_2 = 1452 +SP_BAD_VAR_SHADOW = 1453 +TRG_NO_DEFINER = 1454 +OLD_FILE_FORMAT = 1455 +SP_RECURSION_LIMIT = 1456 +SP_PROC_TABLE_CORRUPT = 1457 +SP_WRONG_NAME = 1458 +TABLE_NEEDS_UPGRADE = 1459 +SP_NO_AGGREGATE = 1460 +MAX_PREPARED_STMT_COUNT_REACHED = 1461 +VIEW_RECURSIVE = 1462 +NON_GROUPING_FIELD_USED = 1463 +TABLE_CANT_HANDLE_SPKEYS = 1464 +NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 +USERNAME = 1466 +HOSTNAME = 1467 +WRONG_STRING_LENGTH = 1468 +ERROR_LAST = 1468 + +# https://github.com/PyMySQL/PyMySQL/issues/607 +CONSTRAINT_FAILED = 4025 diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/FIELD_TYPE.py b/server/www/packages/packages-windows/x86/pymysql/constants/FIELD_TYPE.py new file mode 100644 index 0000000..51bd514 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/constants/FIELD_TYPE.py @@ -0,0 +1,33 @@ + + +DECIMAL = 0 +TINY = 1 +SHORT = 2 +LONG = 3 +FLOAT = 4 +DOUBLE = 5 +NULL = 6 +TIMESTAMP = 7 +LONGLONG = 8 +INT24 = 9 +DATE = 10 +TIME = 11 +DATETIME = 12 +YEAR = 13 +NEWDATE = 14 +VARCHAR = 15 +BIT = 16 +JSON = 245 +NEWDECIMAL = 246 +ENUM = 247 +SET = 248 +TINY_BLOB = 249 +MEDIUM_BLOB = 250 +LONG_BLOB = 251 +BLOB = 252 +VAR_STRING = 253 +STRING = 254 +GEOMETRY = 255 + +CHAR = TINY +INTERVAL = ENUM diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/FLAG.py b/server/www/packages/packages-windows/x86/pymysql/constants/FLAG.py new file mode 100644 index 0000000..f9ebfad --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/constants/FLAG.py @@ -0,0 +1,15 @@ +NOT_NULL = 1 +PRI_KEY = 2 +UNIQUE_KEY = 4 +MULTIPLE_KEY = 8 +BLOB = 16 +UNSIGNED = 32 +ZEROFILL = 64 +BINARY = 128 +ENUM = 256 +AUTO_INCREMENT = 512 +TIMESTAMP = 1024 +SET = 2048 +PART_KEY = 16384 +GROUP = 32767 +UNIQUE = 65536 diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/SERVER_STATUS.py b/server/www/packages/packages-windows/x86/pymysql/constants/SERVER_STATUS.py new file mode 100644 index 0000000..6f5d566 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/constants/SERVER_STATUS.py @@ -0,0 +1,11 @@ + +SERVER_STATUS_IN_TRANS = 1 +SERVER_STATUS_AUTOCOMMIT = 2 +SERVER_MORE_RESULTS_EXISTS = 8 +SERVER_QUERY_NO_GOOD_INDEX_USED = 16 +SERVER_QUERY_NO_INDEX_USED = 32 +SERVER_STATUS_CURSOR_EXISTS = 64 +SERVER_STATUS_LAST_ROW_SENT = 128 +SERVER_STATUS_DB_DROPPED = 256 +SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512 +SERVER_STATUS_METADATA_CHANGED = 1024 diff --git a/server/www/packages/packages-windows/x86/pymysql/constants/__init__.py b/server/www/packages/packages-windows/x86/pymysql/constants/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/www/packages/packages-windows/x86/pymysql/converters.py b/server/www/packages/packages-windows/x86/pymysql/converters.py new file mode 100644 index 0000000..bf1db9d --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/converters.py @@ -0,0 +1,426 @@ +from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON, unichr + +import datetime +from decimal import Decimal +import re +import time + +from .constants import FIELD_TYPE, FLAG +from .charset import charset_by_id, charset_to_encoding + + +def escape_item(val, charset, mapping=None): + if mapping is None: + mapping = encoders + encoder = mapping.get(type(val)) + + # Fallback to default when no encoder found + if not encoder: + try: + encoder = mapping[text_type] + except KeyError: + raise TypeError("no default type converter defined") + + if encoder in (escape_dict, escape_sequence): + val = encoder(val, charset, mapping) + else: + val = encoder(val, mapping) + return val + +def escape_dict(val, charset, mapping=None): + n = {} + for k, v in val.items(): + quoted = escape_item(v, charset, mapping) + n[k] = quoted + return n + +def escape_sequence(val, charset, mapping=None): + n = [] + for item in val: + quoted = escape_item(item, charset, mapping) + n.append(quoted) + return "(" + ",".join(n) + ")" + +def escape_set(val, charset, mapping=None): + return ','.join([escape_item(x, charset, mapping) for x in val]) + +def escape_bool(value, mapping=None): + return str(int(value)) + +def escape_object(value, mapping=None): + return str(value) + +def escape_int(value, mapping=None): + return str(value) + +def escape_float(value, mapping=None): + return ('%.15g' % value) + +_escape_table = [unichr(x) for x in range(128)] +_escape_table[0] = u'\\0' +_escape_table[ord('\\')] = u'\\\\' +_escape_table[ord('\n')] = u'\\n' +_escape_table[ord('\r')] = u'\\r' +_escape_table[ord('\032')] = u'\\Z' +_escape_table[ord('"')] = u'\\"' +_escape_table[ord("'")] = u"\\'" + +def _escape_unicode(value, mapping=None): + """escapes *value* without adding quote. + + Value should be unicode + """ + return value.translate(_escape_table) + +if PY2: + def escape_string(value, mapping=None): + """escape_string escapes *value* but not surround it with quotes. + + Value should be bytes or unicode. + """ + if isinstance(value, unicode): + return _escape_unicode(value) + assert isinstance(value, (bytes, bytearray)) + value = value.replace('\\', '\\\\') + value = value.replace('\0', '\\0') + value = value.replace('\n', '\\n') + value = value.replace('\r', '\\r') + value = value.replace('\032', '\\Z') + value = value.replace("'", "\\'") + value = value.replace('"', '\\"') + return value + + def escape_bytes_prefixed(value, mapping=None): + assert isinstance(value, (bytes, bytearray)) + return b"_binary'%s'" % escape_string(value) + + def escape_bytes(value, mapping=None): + assert isinstance(value, (bytes, bytearray)) + return b"'%s'" % escape_string(value) + +else: + escape_string = _escape_unicode + + # On Python ~3.5, str.decode('ascii', 'surrogateescape') is slow. + # (fixed in Python 3.6, http://bugs.python.org/issue24870) + # Workaround is str.decode('latin1') then translate 0x80-0xff into 0udc80-0udcff. + # We can escape special chars and surrogateescape at once. + _escape_bytes_table = _escape_table + [chr(i) for i in range(0xdc80, 0xdd00)] + + def escape_bytes_prefixed(value, mapping=None): + return "_binary'%s'" % value.decode('latin1').translate(_escape_bytes_table) + + def escape_bytes(value, mapping=None): + return "'%s'" % value.decode('latin1').translate(_escape_bytes_table) + + +def escape_unicode(value, mapping=None): + return u"'%s'" % _escape_unicode(value) + +def escape_str(value, mapping=None): + return "'%s'" % escape_string(str(value), mapping) + +def escape_None(value, mapping=None): + return 'NULL' + +def escape_timedelta(obj, mapping=None): + seconds = int(obj.seconds) % 60 + minutes = int(obj.seconds // 60) % 60 + hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24 + if obj.microseconds: + fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'" + else: + fmt = "'{0:02d}:{1:02d}:{2:02d}'" + return fmt.format(hours, minutes, seconds, obj.microseconds) + +def escape_time(obj, mapping=None): + if obj.microsecond: + fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'" + else: + fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}'" + return fmt.format(obj) + +def escape_datetime(obj, mapping=None): + if obj.microsecond: + fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'" + else: + fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'" + return fmt.format(obj) + +def escape_date(obj, mapping=None): + fmt = "'{0.year:04}-{0.month:02}-{0.day:02}'" + return fmt.format(obj) + +def escape_struct_time(obj, mapping=None): + return escape_datetime(datetime.datetime(*obj[:6])) + +def _convert_second_fraction(s): + if not s: + return 0 + # Pad zeros to ensure the fraction length in microseconds + s = s.ljust(6, '0') + return int(s[:6]) + +DATETIME_RE = re.compile(r"(\d{1,4})-(\d{1,2})-(\d{1,2})[T ](\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") + + +def convert_datetime(obj): + """Returns a DATETIME or TIMESTAMP column value as a datetime object: + + >>> datetime_or_None('2007-02-25 23:06:20') + datetime.datetime(2007, 2, 25, 23, 6, 20) + >>> datetime_or_None('2007-02-25T23:06:20') + datetime.datetime(2007, 2, 25, 23, 6, 20) + + Illegal values are returned as None: + + >>> datetime_or_None('2007-02-31T23:06:20') is None + True + >>> datetime_or_None('0000-00-00 00:00:00') is None + True + + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + + m = DATETIME_RE.match(obj) + if not m: + return convert_date(obj) + + try: + groups = list(m.groups()) + groups[-1] = _convert_second_fraction(groups[-1]) + return datetime.datetime(*[ int(x) for x in groups ]) + except ValueError: + return convert_date(obj) + +TIMEDELTA_RE = re.compile(r"(-)?(\d{1,3}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") + + +def convert_timedelta(obj): + """Returns a TIME column as a timedelta object: + + >>> timedelta_or_None('25:06:17') + datetime.timedelta(1, 3977) + >>> timedelta_or_None('-25:06:17') + datetime.timedelta(-2, 83177) + + Illegal values are returned as None: + + >>> timedelta_or_None('random crap') is None + True + + Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but + can accept values as (+|-)DD HH:MM:SS. The latter format will not + be parsed correctly by this function. + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + + m = TIMEDELTA_RE.match(obj) + if not m: + return obj + + try: + groups = list(m.groups()) + groups[-1] = _convert_second_fraction(groups[-1]) + negate = -1 if groups[0] else 1 + hours, minutes, seconds, microseconds = groups[1:] + + tdelta = datetime.timedelta( + hours = int(hours), + minutes = int(minutes), + seconds = int(seconds), + microseconds = int(microseconds) + ) * negate + return tdelta + except ValueError: + return obj + +TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?") + + +def convert_time(obj): + """Returns a TIME column as a time object: + + >>> time_or_None('15:06:17') + datetime.time(15, 6, 17) + + Illegal values are returned as None: + + >>> time_or_None('-25:06:17') is None + True + >>> time_or_None('random crap') is None + True + + Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but + can accept values as (+|-)DD HH:MM:SS. The latter format will not + be parsed correctly by this function. + + Also note that MySQL's TIME column corresponds more closely to + Python's timedelta and not time. However if you want TIME columns + to be treated as time-of-day and not a time offset, then you can + use set this function as the converter for FIELD_TYPE.TIME. + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + + m = TIME_RE.match(obj) + if not m: + return obj + + try: + groups = list(m.groups()) + groups[-1] = _convert_second_fraction(groups[-1]) + hours, minutes, seconds, microseconds = groups + return datetime.time(hour=int(hours), minute=int(minutes), + second=int(seconds), microsecond=int(microseconds)) + except ValueError: + return obj + + +def convert_date(obj): + """Returns a DATE column as a date object: + + >>> date_or_None('2007-02-26') + datetime.date(2007, 2, 26) + + Illegal values are returned as None: + + >>> date_or_None('2007-02-31') is None + True + >>> date_or_None('0000-00-00') is None + True + + """ + if not PY2 and isinstance(obj, (bytes, bytearray)): + obj = obj.decode('ascii') + try: + return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) + except ValueError: + return obj + + +def convert_mysql_timestamp(timestamp): + """Convert a MySQL TIMESTAMP to a Timestamp object. + + MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME: + + >>> mysql_timestamp_converter('2007-02-25 22:32:17') + datetime.datetime(2007, 2, 25, 22, 32, 17) + + MySQL < 4.1 uses a big string of numbers: + + >>> mysql_timestamp_converter('20070225223217') + datetime.datetime(2007, 2, 25, 22, 32, 17) + + Illegal values are returned as None: + + >>> mysql_timestamp_converter('2007-02-31 22:32:17') is None + True + >>> mysql_timestamp_converter('00000000000000') is None + True + + """ + if not PY2 and isinstance(timestamp, (bytes, bytearray)): + timestamp = timestamp.decode('ascii') + if timestamp[4] == '-': + return convert_datetime(timestamp) + timestamp += "0"*(14-len(timestamp)) # padding + year, month, day, hour, minute, second = \ + int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \ + int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14]) + try: + return datetime.datetime(year, month, day, hour, minute, second) + except ValueError: + return timestamp + +def convert_set(s): + if isinstance(s, (bytes, bytearray)): + return set(s.split(b",")) + return set(s.split(",")) + + +def through(x): + return x + + +#def convert_bit(b): +# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes +# return struct.unpack(">Q", b)[0] +# +# the snippet above is right, but MySQLdb doesn't process bits, +# so we shouldn't either +convert_bit = through + + +def convert_characters(connection, field, data): + field_charset = charset_by_id(field.charsetnr).name + encoding = charset_to_encoding(field_charset) + if field.flags & FLAG.SET: + return convert_set(data.decode(encoding)) + if field.flags & FLAG.BINARY: + return data + + if connection.use_unicode: + data = data.decode(encoding) + elif connection.charset != field_charset: + data = data.decode(encoding) + data = data.encode(connection.encoding) + return data + +encoders = { + bool: escape_bool, + int: escape_int, + long_type: escape_int, + float: escape_float, + str: escape_str, + text_type: escape_unicode, + tuple: escape_sequence, + list: escape_sequence, + set: escape_sequence, + frozenset: escape_sequence, + dict: escape_dict, + type(None): escape_None, + datetime.date: escape_date, + datetime.datetime: escape_datetime, + datetime.timedelta: escape_timedelta, + datetime.time: escape_time, + time.struct_time: escape_struct_time, + Decimal: escape_object, +} + +if not PY2 or JYTHON or IRONPYTHON: + encoders[bytes] = escape_bytes + +decoders = { + FIELD_TYPE.BIT: convert_bit, + FIELD_TYPE.TINY: int, + FIELD_TYPE.SHORT: int, + FIELD_TYPE.LONG: int, + FIELD_TYPE.FLOAT: float, + FIELD_TYPE.DOUBLE: float, + FIELD_TYPE.LONGLONG: int, + FIELD_TYPE.INT24: int, + FIELD_TYPE.YEAR: int, + FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp, + FIELD_TYPE.DATETIME: convert_datetime, + FIELD_TYPE.TIME: convert_timedelta, + FIELD_TYPE.DATE: convert_date, + FIELD_TYPE.SET: convert_set, + FIELD_TYPE.BLOB: through, + FIELD_TYPE.TINY_BLOB: through, + FIELD_TYPE.MEDIUM_BLOB: through, + FIELD_TYPE.LONG_BLOB: through, + FIELD_TYPE.STRING: through, + FIELD_TYPE.VAR_STRING: through, + FIELD_TYPE.VARCHAR: through, + FIELD_TYPE.DECIMAL: Decimal, + FIELD_TYPE.NEWDECIMAL: Decimal, +} + + +# for MySQLdb compatibility +conversions = encoders.copy() +conversions.update(decoders) +Thing2Literal = escape_str diff --git a/server/www/packages/packages-windows/x86/pymysql/cursors.py b/server/www/packages/packages-windows/x86/pymysql/cursors.py new file mode 100644 index 0000000..cc16998 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/cursors.py @@ -0,0 +1,536 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import +from functools import partial +import re +import warnings + +from ._compat import range_type, text_type, PY2 +from . import err + + +#: Regular expression for :meth:`Cursor.executemany`. +#: executemany only suports simple bulk insert. +#: You can use it to load large dataset. +RE_INSERT_VALUES = re.compile( + r"\s*((?:INSERT|REPLACE)\b.+\bVALUES?\s*)" + + r"(\(\s*(?:%s|%\(.+\)s)\s*(?:,\s*(?:%s|%\(.+\)s)\s*)*\))" + + r"(\s*(?:ON DUPLICATE.*)?);?\s*\Z", + re.IGNORECASE | re.DOTALL) + + +class Cursor(object): + """ + This is the object you use to interact with the database. + + Do not create an instance of a Cursor yourself. Call + connections.Connection.cursor(). + + See `Cursor `_ in + the specification. + """ + + #: Max statement size which :meth:`executemany` generates. + #: + #: Max size of allowed statement is max_allowed_packet - packet_header_size. + #: Default value of max_allowed_packet is 1048576. + max_stmt_length = 1024000 + + _defer_warnings = False + + def __init__(self, connection): + self.connection = connection + self.description = None + self.rownumber = 0 + self.rowcount = -1 + self.arraysize = 1 + self._executed = None + self._result = None + self._rows = None + self._warnings_handled = False + + def close(self): + """ + Closing a cursor just exhausts all remaining data. + """ + conn = self.connection + if conn is None: + return + try: + while self.nextset(): + pass + finally: + self.connection = None + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + del exc_info + self.close() + + def _get_db(self): + if not self.connection: + raise err.ProgrammingError("Cursor closed") + return self.connection + + def _check_executed(self): + if not self._executed: + raise err.ProgrammingError("execute() first") + + def _conv_row(self, row): + return row + + def setinputsizes(self, *args): + """Does nothing, required by DB API.""" + + def setoutputsizes(self, *args): + """Does nothing, required by DB API.""" + + def _nextset(self, unbuffered=False): + """Get the next query set""" + conn = self._get_db() + current_result = self._result + # for unbuffered queries warnings are only available once whole result has been read + if unbuffered: + self._show_warnings() + if current_result is None or current_result is not conn._result: + return None + if not current_result.has_next: + return None + self._result = None + self._clear_result() + conn.next_result(unbuffered=unbuffered) + self._do_get_result() + return True + + def nextset(self): + return self._nextset(False) + + def _ensure_bytes(self, x, encoding=None): + if isinstance(x, text_type): + x = x.encode(encoding) + elif isinstance(x, (tuple, list)): + x = type(x)(self._ensure_bytes(v, encoding=encoding) for v in x) + return x + + def _escape_args(self, args, conn): + ensure_bytes = partial(self._ensure_bytes, encoding=conn.encoding) + + if isinstance(args, (tuple, list)): + if PY2: + args = tuple(map(ensure_bytes, args)) + return tuple(conn.literal(arg) for arg in args) + elif isinstance(args, dict): + if PY2: + args = dict((ensure_bytes(key), ensure_bytes(val)) for + (key, val) in args.items()) + return dict((key, conn.literal(val)) for (key, val) in args.items()) + else: + # If it's not a dictionary let's try escaping it anyways. + # Worst case it will throw a Value error + if PY2: + args = ensure_bytes(args) + return conn.escape(args) + + def mogrify(self, query, args=None): + """ + Returns the exact string that is sent to the database by calling the + execute() method. + + This method follows the extension to the DB API 2.0 followed by Psycopg. + """ + conn = self._get_db() + if PY2: # Use bytes on Python 2 always + query = self._ensure_bytes(query, encoding=conn.encoding) + + if args is not None: + query = query % self._escape_args(args, conn) + + return query + + def execute(self, query, args=None): + """Execute a query + + :param str query: Query to execute. + + :param args: parameters used with query. (optional) + :type args: tuple, list or dict + + :return: Number of affected rows + :rtype: int + + If args is a list or tuple, %s can be used as a placeholder in the query. + If args is a dict, %(name)s can be used as a placeholder in the query. + """ + while self.nextset(): + pass + + query = self.mogrify(query, args) + + result = self._query(query) + self._executed = query + return result + + def executemany(self, query, args): + # type: (str, list) -> int + """Run several data against one query + + :param query: query to execute on server + :param args: Sequence of sequences or mappings. It is used as parameter. + :return: Number of rows affected, if any. + + This method improves performance on multiple-row INSERT and + REPLACE. Otherwise it is equivalent to looping over args with + execute(). + """ + if not args: + return + + m = RE_INSERT_VALUES.match(query) + if m: + q_prefix = m.group(1) % () + q_values = m.group(2).rstrip() + q_postfix = m.group(3) or '' + assert q_values[0] == '(' and q_values[-1] == ')' + return self._do_execute_many(q_prefix, q_values, q_postfix, args, + self.max_stmt_length, + self._get_db().encoding) + + self.rowcount = sum(self.execute(query, arg) for arg in args) + return self.rowcount + + def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding): + conn = self._get_db() + escape = self._escape_args + if isinstance(prefix, text_type): + prefix = prefix.encode(encoding) + if PY2 and isinstance(values, text_type): + values = values.encode(encoding) + if isinstance(postfix, text_type): + postfix = postfix.encode(encoding) + sql = bytearray(prefix) + args = iter(args) + v = values % escape(next(args), conn) + if isinstance(v, text_type): + if PY2: + v = v.encode(encoding) + else: + v = v.encode(encoding, 'surrogateescape') + sql += v + rows = 0 + for arg in args: + v = values % escape(arg, conn) + if isinstance(v, text_type): + if PY2: + v = v.encode(encoding) + else: + v = v.encode(encoding, 'surrogateescape') + if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length: + rows += self.execute(sql + postfix) + sql = bytearray(prefix) + else: + sql += b',' + sql += v + rows += self.execute(sql + postfix) + self.rowcount = rows + return rows + + def callproc(self, procname, args=()): + """Execute stored procedure procname with args + + procname -- string, name of procedure to execute on server + + args -- Sequence of parameters to use with procedure + + Returns the original args. + + Compatibility warning: PEP-249 specifies that any modified + parameters must be returned. This is currently impossible + as they are only available by storing them in a server + variable and then retrieved by a query. Since stored + procedures return zero or more result sets, there is no + reliable way to get at OUT or INOUT parameters via callproc. + The server variables are named @_procname_n, where procname + is the parameter above and n is the position of the parameter + (from zero). Once all result sets generated by the procedure + have been fetched, you can issue a SELECT @_procname_0, ... + query using .execute() to get any OUT or INOUT values. + + Compatibility warning: The act of calling a stored procedure + itself creates an empty result set. This appears after any + result sets generated by the procedure. This is non-standard + behavior with respect to the DB-API. Be sure to use nextset() + to advance through all result sets; otherwise you may get + disconnected. + """ + conn = self._get_db() + if args: + fmt = '@_{0}_%d=%s'.format(procname) + self._query('SET %s' % ','.join(fmt % (index, conn.escape(arg)) + for index, arg in enumerate(args))) + self.nextset() + + q = "CALL %s(%s)" % (procname, + ','.join(['@_%s_%d' % (procname, i) + for i in range_type(len(args))])) + self._query(q) + self._executed = q + return args + + def fetchone(self): + """Fetch the next row""" + self._check_executed() + if self._rows is None or self.rownumber >= len(self._rows): + return None + result = self._rows[self.rownumber] + self.rownumber += 1 + return result + + def fetchmany(self, size=None): + """Fetch several rows""" + self._check_executed() + if self._rows is None: + return () + end = self.rownumber + (size or self.arraysize) + result = self._rows[self.rownumber:end] + self.rownumber = min(end, len(self._rows)) + return result + + def fetchall(self): + """Fetch all the rows""" + self._check_executed() + if self._rows is None: + return () + if self.rownumber: + result = self._rows[self.rownumber:] + else: + result = self._rows + self.rownumber = len(self._rows) + return result + + def scroll(self, value, mode='relative'): + self._check_executed() + if mode == 'relative': + r = self.rownumber + value + elif mode == 'absolute': + r = value + else: + raise err.ProgrammingError("unknown scroll mode %s" % mode) + + if not (0 <= r < len(self._rows)): + raise IndexError("out of range") + self.rownumber = r + + def _query(self, q): + conn = self._get_db() + self._last_executed = q + self._clear_result() + conn.query(q) + self._do_get_result() + return self.rowcount + + def _clear_result(self): + self.rownumber = 0 + self._result = None + + self.rowcount = 0 + self.description = None + self.lastrowid = None + self._rows = None + + def _do_get_result(self): + conn = self._get_db() + + self._result = result = conn._result + + self.rowcount = result.affected_rows + self.description = result.description + self.lastrowid = result.insert_id + self._rows = result.rows + self._warnings_handled = False + + if not self._defer_warnings: + self._show_warnings() + + def _show_warnings(self): + if self._warnings_handled: + return + self._warnings_handled = True + if self._result and (self._result.has_next or not self._result.warning_count): + return + ws = self._get_db().show_warnings() + if ws is None: + return + for w in ws: + msg = w[-1] + if PY2: + if isinstance(msg, unicode): + msg = msg.encode('utf-8', 'replace') + warnings.warn(err.Warning(*w[1:3]), stacklevel=4) + + def __iter__(self): + return iter(self.fetchone, None) + + Warning = err.Warning + Error = err.Error + InterfaceError = err.InterfaceError + DatabaseError = err.DatabaseError + DataError = err.DataError + OperationalError = err.OperationalError + IntegrityError = err.IntegrityError + InternalError = err.InternalError + ProgrammingError = err.ProgrammingError + NotSupportedError = err.NotSupportedError + + +class DictCursorMixin(object): + # You can override this to use OrderedDict or other dict-like types. + dict_type = dict + + def _do_get_result(self): + super(DictCursorMixin, self)._do_get_result() + fields = [] + if self.description: + for f in self._result.fields: + name = f.name + if name in fields: + name = f.table_name + '.' + name + fields.append(name) + self._fields = fields + + if fields and self._rows: + self._rows = [self._conv_row(r) for r in self._rows] + + def _conv_row(self, row): + if row is None: + return None + return self.dict_type(zip(self._fields, row)) + + +class DictCursor(DictCursorMixin, Cursor): + """A cursor which returns results as a dictionary""" + + +class SSCursor(Cursor): + """ + Unbuffered Cursor, mainly useful for queries that return a lot of data, + or for connections to remote servers over a slow network. + + Instead of copying every row of data into a buffer, this will fetch + rows as needed. The upside of this is the client uses much less memory, + and rows are returned much faster when traveling over a slow network + or if the result set is very big. + + There are limitations, though. The MySQL protocol doesn't support + returning the total number of rows, so the only way to tell how many rows + there are is to iterate over every row returned. Also, it currently isn't + possible to scroll backwards, as only the current row is held in memory. + """ + + _defer_warnings = True + + def _conv_row(self, row): + return row + + def close(self): + conn = self.connection + if conn is None: + return + + if self._result is not None and self._result is conn._result: + self._result._finish_unbuffered_query() + + try: + while self.nextset(): + pass + finally: + self.connection = None + + __del__ = close + + def _query(self, q): + conn = self._get_db() + self._last_executed = q + self._clear_result() + conn.query(q, unbuffered=True) + self._do_get_result() + return self.rowcount + + def nextset(self): + return self._nextset(unbuffered=True) + + def read_next(self): + """Read next row""" + return self._conv_row(self._result._read_rowdata_packet_unbuffered()) + + def fetchone(self): + """Fetch next row""" + self._check_executed() + row = self.read_next() + if row is None: + self._show_warnings() + return None + self.rownumber += 1 + return row + + def fetchall(self): + """ + Fetch all, as per MySQLdb. Pretty useless for large queries, as + it is buffered. See fetchall_unbuffered(), if you want an unbuffered + generator version of this method. + """ + return list(self.fetchall_unbuffered()) + + def fetchall_unbuffered(self): + """ + Fetch all, implemented as a generator, which isn't to standard, + however, it doesn't make sense to return everything in a list, as that + would use ridiculous memory for large result sets. + """ + return iter(self.fetchone, None) + + def __iter__(self): + return self.fetchall_unbuffered() + + def fetchmany(self, size=None): + """Fetch many""" + self._check_executed() + if size is None: + size = self.arraysize + + rows = [] + for i in range_type(size): + row = self.read_next() + if row is None: + self._show_warnings() + break + rows.append(row) + self.rownumber += 1 + return rows + + def scroll(self, value, mode='relative'): + self._check_executed() + + if mode == 'relative': + if value < 0: + raise err.NotSupportedError( + "Backwards scrolling not supported by this cursor") + + for _ in range_type(value): + self.read_next() + self.rownumber += value + elif mode == 'absolute': + if value < self.rownumber: + raise err.NotSupportedError( + "Backwards scrolling not supported by this cursor") + + end = value - self.rownumber + for _ in range_type(end): + self.read_next() + self.rownumber = value + else: + raise err.ProgrammingError("unknown scroll mode %s" % mode) + + +class SSDictCursor(DictCursorMixin, SSCursor): + """An unbuffered cursor, which returns results as a dictionary""" diff --git a/server/www/packages/packages-windows/x86/pymysql/err.py b/server/www/packages/packages-windows/x86/pymysql/err.py new file mode 100644 index 0000000..fbc6055 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/err.py @@ -0,0 +1,109 @@ +import struct + +from .constants import ER + + +class MySQLError(Exception): + """Exception related to operation with MySQL.""" + + +class Warning(Warning, MySQLError): + """Exception raised for important warnings like data truncations + while inserting, etc.""" + + +class Error(MySQLError): + """Exception that is the base class of all other error exceptions + (not Warning).""" + + +class InterfaceError(Error): + """Exception raised for errors that are related to the database + interface rather than the database itself.""" + + +class DatabaseError(Error): + """Exception raised for errors that are related to the + database.""" + + +class DataError(DatabaseError): + """Exception raised for errors that are due to problems with the + processed data like division by zero, numeric value out of range, + etc.""" + + +class OperationalError(DatabaseError): + """Exception raised for errors that are related to the database's + operation and not necessarily under the control of the programmer, + e.g. an unexpected disconnect occurs, the data source name is not + found, a transaction could not be processed, a memory allocation + error occurred during processing, etc.""" + + +class IntegrityError(DatabaseError): + """Exception raised when the relational integrity of the database + is affected, e.g. a foreign key check fails, duplicate key, + etc.""" + + +class InternalError(DatabaseError): + """Exception raised when the database encounters an internal + error, e.g. the cursor is not valid anymore, the transaction is + out of sync, etc.""" + + +class ProgrammingError(DatabaseError): + """Exception raised for programming errors, e.g. table not found + or already exists, syntax error in the SQL statement, wrong number + of parameters specified, etc.""" + + +class NotSupportedError(DatabaseError): + """Exception raised in case a method or database API was used + which is not supported by the database, e.g. requesting a + .rollback() on a connection that does not support transaction or + has transactions turned off.""" + + +error_map = {} + + +def _map_error(exc, *errors): + for error in errors: + error_map[error] = exc + + +_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR, + ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME, + ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE, + ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION, + ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION, + ER.WRONG_DB_NAME, ER.WRONG_COLUMN_NAME, + ) +_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL, + ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL, + ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW) +_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW, + ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2, + ER.CANNOT_ADD_FOREIGN, ER.BAD_NULL_ERROR) +_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK, + ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE) +_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR, + ER.CON_COUNT_ERROR, ER.TABLEACCESS_DENIED_ERROR, + ER.COLUMNACCESS_DENIED_ERROR, ER.CONSTRAINT_FAILED, ER.LOCK_DEADLOCK) + + +del _map_error, ER + + +def raise_mysql_exception(data): + errno = struct.unpack('= 2 and value[0] == value[-1] == quote: + return value[1:-1] + return value + + def get(self, section, option): + value = configparser.RawConfigParser.get(self, section, option) + return self.__remove_quotes(value) diff --git a/server/www/packages/packages-windows/x86/pymysql/protocol.py b/server/www/packages/packages-windows/x86/pymysql/protocol.py new file mode 100644 index 0000000..8ccf7c4 --- /dev/null +++ b/server/www/packages/packages-windows/x86/pymysql/protocol.py @@ -0,0 +1,341 @@ +# Python implementation of low level MySQL client-server protocol +# http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +from __future__ import print_function +from .charset import MBLENGTH +from ._compat import PY2, range_type +from .constants import FIELD_TYPE, SERVER_STATUS +from . import err +from .util import byte2int + +import struct +import sys + + +DEBUG = False + +NULL_COLUMN = 251 +UNSIGNED_CHAR_COLUMN = 251 +UNSIGNED_SHORT_COLUMN = 252 +UNSIGNED_INT24_COLUMN = 253 +UNSIGNED_INT64_COLUMN = 254 + + +def dump_packet(data): # pragma: no cover + def printable(data): + if 32 <= byte2int(data) < 127: + if isinstance(data, int): + return chr(data) + return data + return '.' + + try: + print("packet length:", len(data)) + for i in range(1, 7): + f = sys._getframe(i) + print("call[%d]: %s (line %d)" % (i, f.f_code.co_name, f.f_lineno)) + print("-" * 66) + except ValueError: + pass + dump_data = [data[i:i+16] for i in range_type(0, min(len(data), 256), 16)] + for d in dump_data: + print(' '.join("{:02X}".format(byte2int(x)) for x in d) + + ' ' * (16 - len(d)) + ' ' * 2 + + ''.join(printable(x) for x in d)) + print("-" * 66) + print() + + +class MysqlPacket(object): + """Representation of a MySQL response packet. + + Provides an interface for reading/parsing the packet results. + """ + __slots__ = ('_position', '_data') + + def __init__(self, data, encoding): + self._position = 0 + self._data = data + + def get_all_data(self): + return self._data + + def read(self, size): + """Read the first 'size' bytes in packet and advance cursor past them.""" + result = self._data[self._position:(self._position+size)] + if len(result) != size: + error = ('Result length not requested length:\n' + 'Expected=%s. Actual=%s. Position: %s. Data Length: %s' + % (size, len(result), self._position, len(self._data))) + if DEBUG: + print(error) + self.dump() + raise AssertionError(error) + self._position += size + return result + + def read_all(self): + """Read all remaining data in the packet. + + (Subsequent read() will return errors.) + """ + result = self._data[self._position:] + self._position = None # ensure no subsequent read() + return result + + def advance(self, length): + """Advance the cursor in data buffer 'length' bytes.""" + new_position = self._position + length + if new_position < 0 or new_position > len(self._data): + raise Exception('Invalid advance amount (%s) for cursor. ' + 'Position=%s' % (length, new_position)) + self._position = new_position + + def rewind(self, position=0): + """Set the position of the data buffer cursor to 'position'.""" + if position < 0 or position > len(self._data): + raise Exception("Invalid position to rewind cursor to: %s." % position) + self._position = position + + def get_bytes(self, position, length=1): + """Get 'length' bytes starting at 'position'. + + Position is start of payload (first four packet header bytes are not + included) starting at index '0'. + + No error checking is done. If requesting outside end of buffer + an empty string (or string shorter than 'length') may be returned! + """ + return self._data[position:(position+length)] + + if PY2: + def read_uint8(self): + result = ord(self._data[self._position]) + self._position += 1 + return result + else: + def read_uint8(self): + result = self._data[self._position] + self._position += 1 + return result + + def read_uint16(self): + result = struct.unpack_from('= 7 + + def is_eof_packet(self): + # http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-EOF_Packet + # Caution: \xFE may be LengthEncodedInteger. + # If \xFE is LengthEncodedInteger header, 8bytes followed. + return self._data[0:1] == b'\xfe' and len(self._data) < 9 + + def is_auth_switch_request(self): + # http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest + return self._data[0:1] == b'\xfe' + + def is_extra_auth_data(self): + # https://dev.mysql.com/doc/internals/en/successful-authentication.html + return self._data[0:1] == b'\x01' + + def is_resultset_packet(self): + field_count = ord(self._data[0:1]) + return 1 <= field_count <= 250 + + def is_load_local_packet(self): + return self._data[0:1] == b'\xfb' + + def is_error_packet(self): + return self._data[0:1] == b'\xff' + + def check_error(self): + if self.is_error_packet(): + self.rewind() + self.advance(1) # field_count == error (we already know that) + errno = self.read_uint16() + if DEBUG: print("errno =", errno) + err.raise_mysql_exception(self._data) + + def dump(self): + dump_packet(self._data) + + +class FieldDescriptorPacket(MysqlPacket): + """A MysqlPacket that represents a specific column's metadata in the result. + + Parsing is automatically done and the results are exported via public + attributes on the class such as: db, table_name, name, length, type_code. + """ + + def __init__(self, data, encoding): + MysqlPacket.__init__(self, data, encoding) + self._parse_field_descriptor(encoding) + + def _parse_field_descriptor(self, encoding): + """Parse the 'Field Descriptor' (Metadata) packet. + + This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). + """ + self.catalog = self.read_length_coded_string() + self.db = self.read_length_coded_string() + self.table_name = self.read_length_coded_string().decode(encoding) + self.org_table = self.read_length_coded_string().decode(encoding) + self.name = self.read_length_coded_string().decode(encoding) + self.org_name = self.read_length_coded_string().decode(encoding) + self.charsetnr, self.length, self.type_code, self.flags, self.scale = ( + self.read_struct(' element (removes white spaces + between individual QR points). + """ + + QR_PATH_STYLE = 'fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none' + + def __init__(self, *args, **kwargs): + self._points = set() + super(SvgPathImage, self).__init__(*args, **kwargs) + + def _svg(self, viewBox=None, **kwargs): + if viewBox is None: + dimension = self.units(self.pixel_size, text=False) + viewBox = '0 0 %(d)s %(d)s' % {'d': dimension} + return super(SvgPathImage, self)._svg(viewBox=viewBox, **kwargs) + + def drawrect(self, row, col): + # (x, y) + self._points.add((col, row)) + + def _generate_subpaths(self): + """Generates individual QR points as subpaths""" + + rect_size = self.units(self.box_size, text=False) + + for point in self._points: + x_base = self.units( + (point[0]+self.border)*self.box_size, text=False) + y_base = self.units( + (point[1]+self.border)*self.box_size, text=False) + + yield ( + 'M %(x0)s %(y0)s L %(x0)s %(y1)s L %(x1)s %(y1)s L %(x1)s ' + '%(y0)s z' % dict( + x0=x_base, y0=y_base, + x1=x_base+rect_size, y1=y_base+rect_size, + )) + + def make_path(self): + subpaths = self._generate_subpaths() + + return ET.Element( + ET.QName("path"), + style=self.QR_PATH_STYLE, + d=' '.join(subpaths), + id="qr-path" + ) + + def _write(self, stream): + self._img.append(self.make_path()) + super(SvgPathImage, self)._write(stream) + + +class SvgFillImage(SvgImage): + """ + An SvgImage that fills the background to white. + """ + background = 'white' + + +class SvgPathFillImage(SvgPathImage): + """ + An SvgPathImage that fills the background to white. + """ + background = 'white' diff --git a/server/www/packages/packages-windows/x86/qrcode/main.py b/server/www/packages/packages-windows/x86/qrcode/main.py new file mode 100644 index 0000000..1e164f1 --- /dev/null +++ b/server/www/packages/packages-windows/x86/qrcode/main.py @@ -0,0 +1,439 @@ +from qrcode import constants, exceptions, util +from qrcode.image.base import BaseImage + +import six +from bisect import bisect_left + + +def make(data=None, **kwargs): + qr = QRCode(**kwargs) + qr.add_data(data) + return qr.make_image() + + +def _check_version(version): + if version < 1 or version > 40: + raise ValueError( + "Invalid version (was %s, expected 1 to 40)" % version) + + +def _check_box_size(size): + if int(size) <= 0: + raise ValueError( + "Invalid box size (was %s, expected larger than 0)" % size) + + +def _check_mask_pattern(mask_pattern): + if mask_pattern is None: + return + if not isinstance(mask_pattern, int): + raise TypeError( + "Invalid mask pattern (was %s, expected int)" % type(mask_pattern)) + if mask_pattern < 0 or mask_pattern > 7: + raise ValueError( + "Mask pattern should be in range(8) (got %s)" % mask_pattern) + +class QRCode: + + def __init__(self, version=None, + error_correction=constants.ERROR_CORRECT_M, + box_size=10, border=4, + image_factory=None, + mask_pattern=None): + _check_box_size(box_size) + self.version = version and int(version) + self.error_correction = int(error_correction) + self.box_size = int(box_size) + # Spec says border should be at least four boxes wide, but allow for + # any (e.g. for producing printable QR codes). + self.border = int(border) + _check_mask_pattern(mask_pattern) + self.mask_pattern = mask_pattern + self.image_factory = image_factory + if image_factory is not None: + assert issubclass(image_factory, BaseImage) + self.clear() + + def clear(self): + """ + Reset the internal data. + """ + self.modules = None + self.modules_count = 0 + self.data_cache = None + self.data_list = [] + + def add_data(self, data, optimize=20): + """ + Add data to this QR Code. + + :param optimize: Data will be split into multiple chunks to optimize + the QR size by finding to more compressed modes of at least this + length. Set to ``0`` to avoid optimizing at all. + """ + if isinstance(data, util.QRData): + self.data_list.append(data) + else: + if optimize: + self.data_list.extend( + util.optimal_data_chunks(data, minimum=optimize)) + else: + self.data_list.append(util.QRData(data)) + self.data_cache = None + + def make(self, fit=True): + """ + Compile the data into a QR Code array. + + :param fit: If ``True`` (or if a size has not been provided), find the + best fit for the data to avoid data overflow errors. + """ + if fit or (self.version is None): + self.best_fit(start=self.version) + if self.mask_pattern is None: + self.makeImpl(False, self.best_mask_pattern()) + else: + self.makeImpl(False, self.mask_pattern) + + def makeImpl(self, test, mask_pattern): + _check_version(self.version) + self.modules_count = self.version * 4 + 17 + self.modules = [None] * self.modules_count + + for row in range(self.modules_count): + + self.modules[row] = [None] * self.modules_count + + for col in range(self.modules_count): + self.modules[row][col] = None # (col + row) % 3 + + self.setup_position_probe_pattern(0, 0) + self.setup_position_probe_pattern(self.modules_count - 7, 0) + self.setup_position_probe_pattern(0, self.modules_count - 7) + self.setup_position_adjust_pattern() + self.setup_timing_pattern() + self.setup_type_info(test, mask_pattern) + + if self.version >= 7: + self.setup_type_number(test) + + if self.data_cache is None: + self.data_cache = util.create_data( + self.version, self.error_correction, self.data_list) + self.map_data(self.data_cache, mask_pattern) + + def setup_position_probe_pattern(self, row, col): + for r in range(-1, 8): + + if row + r <= -1 or self.modules_count <= row + r: + continue + + for c in range(-1, 8): + + if col + c <= -1 or self.modules_count <= col + c: + continue + + if (0 <= r and r <= 6 and (c == 0 or c == 6) + or (0 <= c and c <= 6 and (r == 0 or r == 6)) + or (2 <= r and r <= 4 and 2 <= c and c <= 4)): + self.modules[row + r][col + c] = True + else: + self.modules[row + r][col + c] = False + + def best_fit(self, start=None): + """ + Find the minimum size required to fit in the data. + """ + if start is None: + start = 1 + _check_version(start) + + # Corresponds to the code in util.create_data, except we don't yet know + # version, so optimistically assume start and check later + mode_sizes = util.mode_sizes_for_version(start) + buffer = util.BitBuffer() + for data in self.data_list: + buffer.put(data.mode, 4) + buffer.put(len(data), mode_sizes[data.mode]) + data.write(buffer) + + needed_bits = len(buffer) + self.version = bisect_left(util.BIT_LIMIT_TABLE[self.error_correction], + needed_bits, start) + if self.version == 41: + raise exceptions.DataOverflowError() + + # Now check whether we need more bits for the mode sizes, recursing if + # our guess was too low + if mode_sizes is not util.mode_sizes_for_version(self.version): + self.best_fit(start=self.version) + return self.version + + def best_mask_pattern(self): + """ + Find the most efficient mask pattern. + """ + min_lost_point = 0 + pattern = 0 + + for i in range(8): + self.makeImpl(True, i) + + lost_point = util.lost_point(self.modules) + + if i == 0 or min_lost_point > lost_point: + min_lost_point = lost_point + pattern = i + + return pattern + + def print_tty(self, out=None): + """ + Output the QR Code only using TTY colors. + + If the data has not been compiled yet, make it first. + """ + if out is None: + import sys + out = sys.stdout + + if not out.isatty(): + raise OSError("Not a tty") + + if self.data_cache is None: + self.make() + + modcount = self.modules_count + out.write("\x1b[1;47m" + (" " * (modcount * 2 + 4)) + "\x1b[0m\n") + for r in range(modcount): + out.write("\x1b[1;47m \x1b[40m") + for c in range(modcount): + if self.modules[r][c]: + out.write(" ") + else: + out.write("\x1b[1;47m \x1b[40m") + out.write("\x1b[1;47m \x1b[0m\n") + out.write("\x1b[1;47m" + (" " * (modcount * 2 + 4)) + "\x1b[0m\n") + out.flush() + + def print_ascii(self, out=None, tty=False, invert=False): + """ + Output the QR Code using ASCII characters. + + :param tty: use fixed TTY color codes (forces invert=True) + :param invert: invert the ASCII characters (solid <-> transparent) + """ + if out is None: + import sys + if sys.version_info < (2, 7): + # On Python versions 2.6 and earlier, stdout tries to encode + # strings using ASCII rather than stdout.encoding, so use this + # workaround. + import codecs + out = codecs.getwriter(sys.stdout.encoding)(sys.stdout) + else: + out = sys.stdout + + if tty and not out.isatty(): + raise OSError("Not a tty") + + if self.data_cache is None: + self.make() + + modcount = self.modules_count + codes = [six.int2byte(code).decode('cp437') + for code in (255, 223, 220, 219)] + if tty: + invert = True + if invert: + codes.reverse() + + def get_module(x, y): + if (invert and self.border and + max(x, y) >= modcount+self.border): + return 1 + if min(x, y) < 0 or max(x, y) >= modcount: + return 0 + return self.modules[x][y] + + for r in range(-self.border, modcount+self.border, 2): + if tty: + if not invert or r < modcount+self.border-1: + out.write('\x1b[48;5;232m') # Background black + out.write('\x1b[38;5;255m') # Foreground white + for c in range(-self.border, modcount+self.border): + pos = get_module(r, c) + (get_module(r+1, c) << 1) + out.write(codes[pos]) + if tty: + out.write('\x1b[0m') + out.write('\n') + out.flush() + + def make_image(self, image_factory=None, **kwargs): + """ + Make an image from the QR Code data. + + If the data has not been compiled yet, make it first. + """ + _check_box_size(self.box_size) + if self.data_cache is None: + self.make() + + if image_factory is not None: + assert issubclass(image_factory, BaseImage) + else: + image_factory = self.image_factory + if image_factory is None: + # Use PIL by default + from qrcode.image.pil import PilImage + image_factory = PilImage + + im = image_factory( + self.border, self.modules_count, self.box_size, **kwargs) + for r in range(self.modules_count): + for c in range(self.modules_count): + if self.modules[r][c]: + im.drawrect(r, c) + return im + + def setup_timing_pattern(self): + for r in range(8, self.modules_count - 8): + if self.modules[r][6] is not None: + continue + self.modules[r][6] = (r % 2 == 0) + + for c in range(8, self.modules_count - 8): + if self.modules[6][c] is not None: + continue + self.modules[6][c] = (c % 2 == 0) + + def setup_position_adjust_pattern(self): + pos = util.pattern_position(self.version) + + for i in range(len(pos)): + + for j in range(len(pos)): + + row = pos[i] + col = pos[j] + + if self.modules[row][col] is not None: + continue + + for r in range(-2, 3): + + for c in range(-2, 3): + + if (r == -2 or r == 2 or c == -2 or c == 2 or + (r == 0 and c == 0)): + self.modules[row + r][col + c] = True + else: + self.modules[row + r][col + c] = False + + def setup_type_number(self, test): + bits = util.BCH_type_number(self.version) + + for i in range(18): + mod = (not test and ((bits >> i) & 1) == 1) + self.modules[i // 3][i % 3 + self.modules_count - 8 - 3] = mod + + for i in range(18): + mod = (not test and ((bits >> i) & 1) == 1) + self.modules[i % 3 + self.modules_count - 8 - 3][i // 3] = mod + + def setup_type_info(self, test, mask_pattern): + data = (self.error_correction << 3) | mask_pattern + bits = util.BCH_type_info(data) + + # vertical + for i in range(15): + + mod = (not test and ((bits >> i) & 1) == 1) + + if i < 6: + self.modules[i][8] = mod + elif i < 8: + self.modules[i + 1][8] = mod + else: + self.modules[self.modules_count - 15 + i][8] = mod + + # horizontal + for i in range(15): + + mod = (not test and ((bits >> i) & 1) == 1) + + if i < 8: + self.modules[8][self.modules_count - i - 1] = mod + elif i < 9: + self.modules[8][15 - i - 1 + 1] = mod + else: + self.modules[8][15 - i - 1] = mod + + # fixed module + self.modules[self.modules_count - 8][8] = (not test) + + def map_data(self, data, mask_pattern): + inc = -1 + row = self.modules_count - 1 + bitIndex = 7 + byteIndex = 0 + + mask_func = util.mask_func(mask_pattern) + + data_len = len(data) + + for col in six.moves.xrange(self.modules_count - 1, 0, -2): + + if col <= 6: + col -= 1 + + col_range = (col, col-1) + + while True: + + for c in col_range: + + if self.modules[row][c] is None: + + dark = False + + if byteIndex < data_len: + dark = (((data[byteIndex] >> bitIndex) & 1) == 1) + + if mask_func(row, c): + dark = not dark + + self.modules[row][c] = dark + bitIndex -= 1 + + if bitIndex == -1: + byteIndex += 1 + bitIndex = 7 + + row += inc + + if row < 0 or self.modules_count <= row: + row -= inc + inc = -inc + break + + def get_matrix(self): + """ + Return the QR Code as a multidimensonal array, including the border. + + To return the array without a border, set ``self.border`` to 0 first. + """ + if self.data_cache is None: + self.make() + + if not self.border: + return self.modules + + width = len(self.modules) + self.border*2 + code = [[False]*width] * self.border + x_border = [False]*self.border + for module in self.modules: + code.append(x_border + module + x_border) + code += [[False]*width] * self.border + + return code diff --git a/server/www/packages/packages-windows/x86/qrcode/release.py b/server/www/packages/packages-windows/x86/qrcode/release.py new file mode 100644 index 0000000..abbabb4 --- /dev/null +++ b/server/www/packages/packages-windows/x86/qrcode/release.py @@ -0,0 +1,42 @@ +""" +This file provides zest.releaser entrypoints using when releasing new +qrcode versions. +""" +import os +import re +import datetime + + +def update_manpage(data): + """ + Update the version in the manpage document. + """ + if data['name'] != 'qrcode': + print('no qrcode') + return + + base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + filename = os.path.join(base_dir, 'doc', 'qr.1') + with open(filename, 'r') as f: + lines = f.readlines() + + changed = False + for i, line in enumerate(lines): + if not line.startswith('.TH '): + continue + parts = re.split(r'"([^"]*)"', line) + if len(parts) < 5: + continue + changed = parts[3] != data['new_version'] + if changed: + # Update version + parts[3] = data['new_version'] + # Update date + parts[1] = datetime.datetime.now().strftime('%-d %b %Y') + lines[i] = '"'.join(parts) + break + + if changed: + with open(filename, 'w') as f: + for line in lines: + f.write(line) diff --git a/server/www/packages/packages-windows/x86/qrcode/util.py b/server/www/packages/packages-windows/x86/qrcode/util.py new file mode 100644 index 0000000..a9652f7 --- /dev/null +++ b/server/www/packages/packages-windows/x86/qrcode/util.py @@ -0,0 +1,590 @@ +import re +import math + +import six +from six.moves import xrange + +from qrcode import base, exceptions, LUT + +# QR encoding modes. +MODE_NUMBER = 1 << 0 +MODE_ALPHA_NUM = 1 << 1 +MODE_8BIT_BYTE = 1 << 2 +MODE_KANJI = 1 << 3 + +# Encoding mode sizes. +MODE_SIZE_SMALL = { + MODE_NUMBER: 10, + MODE_ALPHA_NUM: 9, + MODE_8BIT_BYTE: 8, + MODE_KANJI: 8, +} +MODE_SIZE_MEDIUM = { + MODE_NUMBER: 12, + MODE_ALPHA_NUM: 11, + MODE_8BIT_BYTE: 16, + MODE_KANJI: 10, +} +MODE_SIZE_LARGE = { + MODE_NUMBER: 14, + MODE_ALPHA_NUM: 13, + MODE_8BIT_BYTE: 16, + MODE_KANJI: 12, +} + +ALPHA_NUM = six.b('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:') +RE_ALPHA_NUM = re.compile(six.b('^[') + re.escape(ALPHA_NUM) + six.b(']*\Z')) + +# The number of bits for numeric delimited data lengths. +NUMBER_LENGTH = {3: 10, 2: 7, 1: 4} + +PATTERN_POSITION_TABLE = [ + [], + [6, 18], + [6, 22], + [6, 26], + [6, 30], + [6, 34], + [6, 22, 38], + [6, 24, 42], + [6, 26, 46], + [6, 28, 50], + [6, 30, 54], + [6, 32, 58], + [6, 34, 62], + [6, 26, 46, 66], + [6, 26, 48, 70], + [6, 26, 50, 74], + [6, 30, 54, 78], + [6, 30, 56, 82], + [6, 30, 58, 86], + [6, 34, 62, 90], + [6, 28, 50, 72, 94], + [6, 26, 50, 74, 98], + [6, 30, 54, 78, 102], + [6, 28, 54, 80, 106], + [6, 32, 58, 84, 110], + [6, 30, 58, 86, 114], + [6, 34, 62, 90, 118], + [6, 26, 50, 74, 98, 122], + [6, 30, 54, 78, 102, 126], + [6, 26, 52, 78, 104, 130], + [6, 30, 56, 82, 108, 134], + [6, 34, 60, 86, 112, 138], + [6, 30, 58, 86, 114, 142], + [6, 34, 62, 90, 118, 146], + [6, 30, 54, 78, 102, 126, 150], + [6, 24, 50, 76, 102, 128, 154], + [6, 28, 54, 80, 106, 132, 158], + [6, 32, 58, 84, 110, 136, 162], + [6, 26, 54, 82, 110, 138, 166], + [6, 30, 58, 86, 114, 142, 170] +] + +G15 = ( + (1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) | + (1 << 0)) +G18 = ( + (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) | (1 << 5) | + (1 << 2) | (1 << 0)) +G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1) + +PAD0 = 0xEC +PAD1 = 0x11 + +# Precompute bit count limits, indexed by error correction level and code size +_data_count = lambda block: block.data_count +BIT_LIMIT_TABLE = [ + [0] + [8*sum(map(_data_count, base.rs_blocks(version, error_correction))) + for version in xrange(1, 41)] + for error_correction in xrange(4) +] + + +def BCH_type_info(data): + d = data << 10 + while BCH_digit(d) - BCH_digit(G15) >= 0: + d ^= (G15 << (BCH_digit(d) - BCH_digit(G15))) + + return ((data << 10) | d) ^ G15_MASK + + +def BCH_type_number(data): + d = data << 12 + while BCH_digit(d) - BCH_digit(G18) >= 0: + d ^= (G18 << (BCH_digit(d) - BCH_digit(G18))) + return (data << 12) | d + + +def BCH_digit(data): + digit = 0 + while data != 0: + digit += 1 + data >>= 1 + return digit + + +def pattern_position(version): + return PATTERN_POSITION_TABLE[version - 1] + + +def mask_func(pattern): + """ + Return the mask function for the given mask pattern. + """ + if pattern == 0: # 000 + return lambda i, j: (i + j) % 2 == 0 + if pattern == 1: # 001 + return lambda i, j: i % 2 == 0 + if pattern == 2: # 010 + return lambda i, j: j % 3 == 0 + if pattern == 3: # 011 + return lambda i, j: (i + j) % 3 == 0 + if pattern == 4: # 100 + return lambda i, j: (math.floor(i / 2) + math.floor(j / 3)) % 2 == 0 + if pattern == 5: # 101 + return lambda i, j: (i * j) % 2 + (i * j) % 3 == 0 + if pattern == 6: # 110 + return lambda i, j: ((i * j) % 2 + (i * j) % 3) % 2 == 0 + if pattern == 7: # 111 + return lambda i, j: ((i * j) % 3 + (i + j) % 2) % 2 == 0 + raise TypeError("Bad mask pattern: " + pattern) # pragma: no cover + + +def mode_sizes_for_version(version): + if version < 10: + return MODE_SIZE_SMALL + elif version < 27: + return MODE_SIZE_MEDIUM + else: + return MODE_SIZE_LARGE + + +def length_in_bits(mode, version): + if mode not in ( + MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE, MODE_KANJI): + raise TypeError("Invalid mode (%s)" % mode) # pragma: no cover + + if version < 1 or version > 40: # pragma: no cover + raise ValueError( + "Invalid version (was %s, expected 1 to 40)" % version) + + return mode_sizes_for_version(version)[mode] + + +def lost_point(modules): + modules_count = len(modules) + + lost_point = 0 + + lost_point = _lost_point_level1(modules, modules_count) + lost_point += _lost_point_level2(modules, modules_count) + lost_point += _lost_point_level3(modules, modules_count) + lost_point += _lost_point_level4(modules, modules_count) + + return lost_point + + +def _lost_point_level1(modules, modules_count): + lost_point = 0 + + modules_range = xrange(modules_count) + container = [0] * (modules_count + 1) + + for row in modules_range: + this_row = modules[row] + previous_color = this_row[0] + length = 0 + for col in modules_range: + if this_row[col] == previous_color: + length += 1 + else: + if length >= 5: + container[length] += 1 + length = 1 + previous_color = this_row[col] + if length >= 5: + container[length] += 1 + + for col in modules_range: + previous_color = modules[0][col] + length = 0 + for row in modules_range: + if modules[row][col] == previous_color: + length += 1 + else: + if length >= 5: + container[length] += 1 + length = 1 + previous_color = modules[row][col] + if length >= 5: + container[length] += 1 + + lost_point += sum(container[each_length] * (each_length - 2) + for each_length in xrange(5, modules_count + 1)) + + return lost_point + + +def _lost_point_level2(modules, modules_count): + lost_point = 0 + + modules_range = xrange(modules_count - 1) + for row in modules_range: + this_row = modules[row] + next_row = modules[row + 1] + # use iter() and next() to skip next four-block. e.g. + # d a f if top-right a != b botton-right, + # c b e then both abcd and abef won't lost any point. + modules_range_iter = iter(modules_range) + for col in modules_range_iter: + top_right = this_row[col + 1] + if top_right != next_row[col + 1]: + # reduce 33.3% of runtime via next(). + # None: raise nothing if there is no next item. + next(modules_range_iter, None) + elif top_right != this_row[col]: + continue + elif top_right != next_row[col]: + continue + else: + lost_point += 3 + + return lost_point + + +def _lost_point_level3(modules, modules_count): + # 1 : 1 : 3 : 1 : 1 ratio (dark:light:dark:light:dark) pattern in + # row/column, preceded or followed by light area 4 modules wide. From ISOIEC. + # pattern1: 10111010000 + # pattern2: 00001011101 + modules_range = xrange(modules_count) + modules_range_short = xrange(modules_count-10) + lost_point = 0 + + for row in modules_range: + this_row = modules[row] + modules_range_short_iter = iter(modules_range_short) + col = 0 + for col in modules_range_short_iter: + if ( + not this_row[col + 1] + and this_row[col + 4] + and not this_row[col + 5] + and this_row[col + 6] + and not this_row[col + 9] + and ( + this_row[col + 0] + and this_row[col + 2] + and this_row[col + 3] + and not this_row[col + 7] + and not this_row[col + 8] + and not this_row[col + 10] + or + not this_row[col + 0] + and not this_row[col + 2] + and not this_row[col + 3] + and this_row[col + 7] + and this_row[col + 8] + and this_row[col + 10] + ) + ): + lost_point += 40 +# horspool algorithm. +# if this_row[col + 10] == True, pattern1 shift 4, pattern2 shift 2. So min=2. +# if this_row[col + 10] == False, pattern1 shift 1, pattern2 shift 1. So min=1. + if this_row[col + 10]: + next(modules_range_short_iter, None) + + for col in modules_range: + modules_range_short_iter = iter(modules_range_short) + row = 0 + for row in modules_range_short_iter: + if ( + not modules[row + 1][col] + and modules[row + 4][col] + and not modules[row + 5][col] + and modules[row + 6][col] + and not modules[row + 9][col] + and ( + modules[row + 0][col] + and modules[row + 2][col] + and modules[row + 3][col] + and not modules[row + 7][col] + and not modules[row + 8][col] + and not modules[row + 10][col] + or + not modules[row + 0][col] + and not modules[row + 2][col] + and not modules[row + 3][col] + and modules[row + 7][col] + and modules[row + 8][col] + and modules[row + 10][col] + ) + ): + lost_point += 40 + if modules[row + 10][col]: + next(modules_range_short_iter, None) + + return lost_point + + +def _lost_point_level4(modules, modules_count): + dark_count = sum(map(sum, modules)) + percent = float(dark_count) / (modules_count**2) + # Every 5% departure from 50%, rating++ + rating = int(abs(percent * 100 - 50) / 5) + return rating * 10 + + +def optimal_data_chunks(data, minimum=4): + """ + An iterator returning QRData chunks optimized to the data content. + + :param minimum: The minimum number of bytes in a row to split as a chunk. + """ + data = to_bytestring(data) + re_repeat = ( + six.b('{') + six.text_type(minimum).encode('ascii') + six.b(',}')) + num_pattern = re.compile(six.b('\d') + re_repeat) + num_bits = _optimal_split(data, num_pattern) + alpha_pattern = re.compile( + six.b('[') + re.escape(ALPHA_NUM) + six.b(']') + re_repeat) + for is_num, chunk in num_bits: + if is_num: + yield QRData(chunk, mode=MODE_NUMBER, check_data=False) + else: + for is_alpha, sub_chunk in _optimal_split(chunk, alpha_pattern): + if is_alpha: + mode = MODE_ALPHA_NUM + else: + mode = MODE_8BIT_BYTE + yield QRData(sub_chunk, mode=mode, check_data=False) + + +def _optimal_split(data, pattern): + while data: + match = re.search(pattern, data) + if not match: + break + start, end = match.start(), match.end() + if start: + yield False, data[:start] + yield True, data[start:end] + data = data[end:] + if data: + yield False, data + + +def to_bytestring(data): + """ + Convert data to a (utf-8 encoded) byte-string if it isn't a byte-string + already. + """ + if not isinstance(data, six.binary_type): + data = six.text_type(data).encode('utf-8') + return data + + +def optimal_mode(data): + """ + Calculate the optimal mode for this chunk of data. + """ + if data.isdigit(): + return MODE_NUMBER + if RE_ALPHA_NUM.match(data): + return MODE_ALPHA_NUM + return MODE_8BIT_BYTE + + +class QRData: + """ + Data held in a QR compatible format. + + Doesn't currently handle KANJI. + """ + + def __init__(self, data, mode=None, check_data=True): + """ + If ``mode`` isn't provided, the most compact QR data type possible is + chosen. + """ + if check_data: + data = to_bytestring(data) + + if mode is None: + self.mode = optimal_mode(data) + else: + self.mode = mode + if mode not in (MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE): + raise TypeError("Invalid mode (%s)" % mode) # pragma: no cover + if check_data and mode < optimal_mode(data): # pragma: no cover + raise ValueError( + "Provided data can not be represented in mode " + "{0}".format(mode)) + + self.data = data + + def __len__(self): + return len(self.data) + + def write(self, buffer): + if self.mode == MODE_NUMBER: + for i in xrange(0, len(self.data), 3): + chars = self.data[i:i + 3] + bit_length = NUMBER_LENGTH[len(chars)] + buffer.put(int(chars), bit_length) + elif self.mode == MODE_ALPHA_NUM: + for i in xrange(0, len(self.data), 2): + chars = self.data[i:i + 2] + if len(chars) > 1: + buffer.put( + ALPHA_NUM.find(chars[0]) * 45 + + ALPHA_NUM.find(chars[1]), 11) + else: + buffer.put(ALPHA_NUM.find(chars), 6) + else: + if six.PY3: + # Iterating a bytestring in Python 3 returns an integer, + # no need to ord(). + data = self.data + else: + data = [ord(c) for c in self.data] + for c in data: + buffer.put(c, 8) + + def __repr__(self): + return repr(self.data) + + +class BitBuffer: + + def __init__(self): + self.buffer = [] + self.length = 0 + + def __repr__(self): + return ".".join([str(n) for n in self.buffer]) + + def get(self, index): + buf_index = math.floor(index / 8) + return ((self.buffer[buf_index] >> (7 - index % 8)) & 1) == 1 + + def put(self, num, length): + for i in range(length): + self.put_bit(((num >> (length - i - 1)) & 1) == 1) + + def __len__(self): + return self.length + + def put_bit(self, bit): + buf_index = self.length // 8 + if len(self.buffer) <= buf_index: + self.buffer.append(0) + if bit: + self.buffer[buf_index] |= (0x80 >> (self.length % 8)) + self.length += 1 + + +def create_bytes(buffer, rs_blocks): + offset = 0 + + maxDcCount = 0 + maxEcCount = 0 + + dcdata = [0] * len(rs_blocks) + ecdata = [0] * len(rs_blocks) + + for r in range(len(rs_blocks)): + + dcCount = rs_blocks[r].data_count + ecCount = rs_blocks[r].total_count - dcCount + + maxDcCount = max(maxDcCount, dcCount) + maxEcCount = max(maxEcCount, ecCount) + + dcdata[r] = [0] * dcCount + + for i in range(len(dcdata[r])): + dcdata[r][i] = 0xff & buffer.buffer[i + offset] + offset += dcCount + + # Get error correction polynomial. + if ecCount in LUT.rsPoly_LUT: + rsPoly = base.Polynomial(LUT.rsPoly_LUT[ecCount], 0) + else: + rsPoly = base.Polynomial([1], 0) + for i in range(ecCount): + rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0) + + rawPoly = base.Polynomial(dcdata[r], len(rsPoly) - 1) + + modPoly = rawPoly % rsPoly + ecdata[r] = [0] * (len(rsPoly) - 1) + for i in range(len(ecdata[r])): + modIndex = i + len(modPoly) - len(ecdata[r]) + if (modIndex >= 0): + ecdata[r][i] = modPoly[modIndex] + else: + ecdata[r][i] = 0 + + totalCodeCount = 0 + for rs_block in rs_blocks: + totalCodeCount += rs_block.total_count + + data = [None] * totalCodeCount + index = 0 + + for i in range(maxDcCount): + for r in range(len(rs_blocks)): + if i < len(dcdata[r]): + data[index] = dcdata[r][i] + index += 1 + + for i in range(maxEcCount): + for r in range(len(rs_blocks)): + if i < len(ecdata[r]): + data[index] = ecdata[r][i] + index += 1 + + return data + + +def create_data(version, error_correction, data_list): + + buffer = BitBuffer() + for data in data_list: + buffer.put(data.mode, 4) + buffer.put(len(data), length_in_bits(data.mode, version)) + data.write(buffer) + + # Calculate the maximum number of bits for the given version. + rs_blocks = base.rs_blocks(version, error_correction) + bit_limit = 0 + for block in rs_blocks: + bit_limit += block.data_count * 8 + + if len(buffer) > bit_limit: + raise exceptions.DataOverflowError( + "Code length overflow. Data size (%s) > size available (%s)" % + (len(buffer), bit_limit)) + + # Terminate the bits (add up to four 0s). + for i in range(min(bit_limit - len(buffer), 4)): + buffer.put_bit(False) + + # Delimit the string into 8-bit words, padding with 0s if necessary. + delimit = len(buffer) % 8 + if delimit: + for i in range(8 - delimit): + buffer.put_bit(False) + + # Add special alternating padding bitstrings until buffer is full. + bytes_to_fill = (bit_limit - len(buffer)) // 8 + for i in range(bytes_to_fill): + if i % 2 == 0: + buffer.put(PAD0, 8) + else: + buffer.put(PAD1, 8) + + return create_bytes(buffer, rs_blocks) diff --git a/server/www/packages/packages-windows/x86/six.py b/server/www/packages/packages-windows/x86/six.py new file mode 100644 index 0000000..6bf4fd3 --- /dev/null +++ b/server/www/packages/packages-windows/x86/six.py @@ -0,0 +1,891 @@ +# Copyright (c) 2010-2017 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.11.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/server/www/packages/packages-windows/x86/tornado/__init__.py b/server/www/packages/packages-windows/x86/tornado/__init__.py new file mode 100644 index 0000000..b269cf7 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/__init__.py @@ -0,0 +1,28 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Tornado web server and tools.""" + +from __future__ import absolute_import, division, print_function + +# version is a human-readable version number. + +# version_info is a four-tuple for programmatic comparison. The first +# three numbers are the components of the version number. The fourth +# is zero for an official release, positive for a development branch, +# or negative for a release candidate or beta (after the base version +# number has been incremented) +version = "5.1.1" +version_info = (5, 1, 1, 0) diff --git a/server/www/packages/packages-windows/x86/tornado/_locale_data.py b/server/www/packages/packages-windows/x86/tornado/_locale_data.py new file mode 100644 index 0000000..a2c5039 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/_locale_data.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Data used by the tornado.locale module.""" + +from __future__ import absolute_import, division, print_function + +LOCALE_NAMES = { + "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, + "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"}, + "ar_AR": {"name_en": u"Arabic", "name": u"العربية"}, + "bg_BG": {"name_en": u"Bulgarian", "name": u"Български"}, + "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"}, + "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"}, + "ca_ES": {"name_en": u"Catalan", "name": u"Català"}, + "cs_CZ": {"name_en": u"Czech", "name": u"Čeština"}, + "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"}, + "da_DK": {"name_en": u"Danish", "name": u"Dansk"}, + "de_DE": {"name_en": u"German", "name": u"Deutsch"}, + "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"}, + "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"}, + "en_US": {"name_en": u"English (US)", "name": u"English (US)"}, + "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"}, + "es_LA": {"name_en": u"Spanish", "name": u"Español"}, + "et_EE": {"name_en": u"Estonian", "name": u"Eesti"}, + "eu_ES": {"name_en": u"Basque", "name": u"Euskara"}, + "fa_IR": {"name_en": u"Persian", "name": u"فارسی"}, + "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"}, + "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"}, + "fr_FR": {"name_en": u"French", "name": u"Français"}, + "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"}, + "gl_ES": {"name_en": u"Galician", "name": u"Galego"}, + "he_IL": {"name_en": u"Hebrew", "name": u"עברית"}, + "hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"}, + "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"}, + "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"}, + "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"}, + "is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"}, + "it_IT": {"name_en": u"Italian", "name": u"Italiano"}, + "ja_JP": {"name_en": u"Japanese", "name": u"日本語"}, + "ko_KR": {"name_en": u"Korean", "name": u"한국어"}, + "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"}, + "lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"}, + "mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"}, + "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"}, + "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"}, + "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"}, + "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"}, + "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"}, + "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"}, + "pl_PL": {"name_en": u"Polish", "name": u"Polski"}, + "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"}, + "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"}, + "ro_RO": {"name_en": u"Romanian", "name": u"Română"}, + "ru_RU": {"name_en": u"Russian", "name": u"Русский"}, + "sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"}, + "sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"}, + "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"}, + "sr_RS": {"name_en": u"Serbian", "name": u"Српски"}, + "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"}, + "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"}, + "ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"}, + "te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"}, + "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"}, + "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"}, + "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"}, + "uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"}, + "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"}, + "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"}, + "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"}, +} diff --git a/server/www/packages/packages-windows/x86/tornado/auth.py b/server/www/packages/packages-windows/x86/tornado/auth.py new file mode 100644 index 0000000..b79ad14 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/auth.py @@ -0,0 +1,1236 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""This module contains implementations of various third-party +authentication schemes. + +All the classes in this file are class mixins designed to be used with +the `tornado.web.RequestHandler` class. They are used in two ways: + +* On a login handler, use methods such as ``authenticate_redirect()``, + ``authorize_redirect()``, and ``get_authenticated_user()`` to + establish the user's identity and store authentication tokens to your + database and/or cookies. +* In non-login handlers, use methods such as ``facebook_request()`` + or ``twitter_request()`` to use the authentication tokens to make + requests to the respective services. + +They all take slightly different arguments due to the fact all these +services implement authentication and authorization slightly differently. +See the individual service classes below for complete documentation. + +Example usage for Google OAuth: + +.. testcode:: + + class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, + tornado.auth.GoogleOAuth2Mixin): + async def get(self): + if self.get_argument('code', False): + user = await self.get_authenticated_user( + redirect_uri='http://your.site.com/auth/google', + code=self.get_argument('code')) + # Save the user with e.g. set_secure_cookie + else: + await self.authorize_redirect( + redirect_uri='http://your.site.com/auth/google', + client_id=self.settings['google_oauth']['key'], + scope=['profile', 'email'], + response_type='code', + extra_params={'approval_prompt': 'auto'}) + +.. testoutput:: + :hide: + + +.. versionchanged:: 4.0 + All of the callback interfaces in this module are now guaranteed + to run their callback with an argument of ``None`` on error. + Previously some functions would do this while others would simply + terminate the request on their own. This change also ensures that + errors are more consistently reported through the ``Future`` interfaces. +""" + +from __future__ import absolute_import, division, print_function + +import base64 +import binascii +import functools +import hashlib +import hmac +import time +import uuid +import warnings + +from tornado.concurrent import (Future, _non_deprecated_return_future, + future_set_exc_info, chain_future, + future_set_result_unless_cancelled) +from tornado import gen +from tornado import httpclient +from tornado import escape +from tornado.httputil import url_concat +from tornado.log import gen_log +from tornado.stack_context import ExceptionStackContext, wrap +from tornado.util import unicode_type, ArgReplacer, PY3 + +if PY3: + import urllib.parse as urlparse + import urllib.parse as urllib_parse + long = int +else: + import urlparse + import urllib as urllib_parse + + +class AuthError(Exception): + pass + + +def _auth_future_to_callback(callback, future): + try: + result = future.result() + except AuthError as e: + gen_log.warning(str(e)) + result = None + callback(result) + + +def _auth_return_future(f): + """Similar to tornado.concurrent.return_future, but uses the auth + module's legacy callback interface. + + Note that when using this decorator the ``callback`` parameter + inside the function will actually be a future. + + .. deprecated:: 5.1 + Will be removed in 6.0. + """ + replacer = ArgReplacer(f, 'callback') + + @functools.wraps(f) + def wrapper(*args, **kwargs): + future = Future() + callback, args, kwargs = replacer.replace(future, args, kwargs) + if callback is not None: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) + future.add_done_callback( + wrap(functools.partial(_auth_future_to_callback, callback))) + + def handle_exception(typ, value, tb): + if future.done(): + return False + else: + future_set_exc_info(future, (typ, value, tb)) + return True + with ExceptionStackContext(handle_exception, delay_warning=True): + f(*args, **kwargs) + return future + return wrapper + + +class OpenIdMixin(object): + """Abstract implementation of OpenID and Attribute Exchange. + + Class attributes: + + * ``_OPENID_ENDPOINT``: the identity provider's URI. + """ + @_non_deprecated_return_future + def authenticate_redirect(self, callback_uri=None, + ax_attrs=["name", "email", "language", "username"], + callback=None): + """Redirects to the authentication URL for this service. + + After authentication, the service will redirect back to the given + callback URI with additional parameters including ``openid.mode``. + + We request the given attributes for the authenticated user by + default (name, email, language, and username). If you don't need + all those attributes for your app, you can request fewer with + the ax_attrs keyword argument. + + .. versionchanged:: 3.1 + Returns a `.Future` and takes an optional callback. These are + not strictly necessary as this method is synchronous, + but they are supplied for consistency with + `OAuthMixin.authorize_redirect`. + + .. deprecated:: 5.1 + + The ``callback`` argument and returned awaitable will be removed + in Tornado 6.0; this will be an ordinary synchronous function. + """ + callback_uri = callback_uri or self.request.uri + args = self._openid_args(callback_uri, ax_attrs=ax_attrs) + self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args)) + callback() + + @_auth_return_future + def get_authenticated_user(self, callback, http_client=None): + """Fetches the authenticated user data upon redirect. + + This method should be called by the handler that receives the + redirect from the `authenticate_redirect()` method (which is + often the same as the one that calls it; in that case you would + call `get_authenticated_user` if the ``openid.mode`` parameter + is present and `authenticate_redirect` if it is not). + + The result of this method will generally be used to set a cookie. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + # Verify the OpenID response via direct request to the OP + args = dict((k, v[-1]) for k, v in self.request.arguments.items()) + args["openid.mode"] = u"check_authentication" + url = self._OPENID_ENDPOINT + if http_client is None: + http_client = self.get_auth_http_client() + fut = http_client.fetch(url, method="POST", body=urllib_parse.urlencode(args)) + fut.add_done_callback(wrap(functools.partial( + self._on_authentication_verified, callback))) + + def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): + url = urlparse.urljoin(self.request.full_url(), callback_uri) + args = { + "openid.ns": "http://specs.openid.net/auth/2.0", + "openid.claimed_id": + "http://specs.openid.net/auth/2.0/identifier_select", + "openid.identity": + "http://specs.openid.net/auth/2.0/identifier_select", + "openid.return_to": url, + "openid.realm": urlparse.urljoin(url, '/'), + "openid.mode": "checkid_setup", + } + if ax_attrs: + args.update({ + "openid.ns.ax": "http://openid.net/srv/ax/1.0", + "openid.ax.mode": "fetch_request", + }) + ax_attrs = set(ax_attrs) + required = [] + if "name" in ax_attrs: + ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) + required += ["firstname", "fullname", "lastname"] + args.update({ + "openid.ax.type.firstname": + "http://axschema.org/namePerson/first", + "openid.ax.type.fullname": + "http://axschema.org/namePerson", + "openid.ax.type.lastname": + "http://axschema.org/namePerson/last", + }) + known_attrs = { + "email": "http://axschema.org/contact/email", + "language": "http://axschema.org/pref/language", + "username": "http://axschema.org/namePerson/friendly", + } + for name in ax_attrs: + args["openid.ax.type." + name] = known_attrs[name] + required.append(name) + args["openid.ax.required"] = ",".join(required) + if oauth_scope: + args.update({ + "openid.ns.oauth": + "http://specs.openid.net/extensions/oauth/1.0", + "openid.oauth.consumer": self.request.host.split(":")[0], + "openid.oauth.scope": oauth_scope, + }) + return args + + def _on_authentication_verified(self, future, response_fut): + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError( + "Error response %s" % e)) + return + if b"is_valid:true" not in response.body: + future.set_exception(AuthError( + "Invalid OpenID response: %s" % response.body)) + return + + # Make sure we got back at least an email from attribute exchange + ax_ns = None + for name in self.request.arguments: + if name.startswith("openid.ns.") and \ + self.get_argument(name) == u"http://openid.net/srv/ax/1.0": + ax_ns = name[10:] + break + + def get_ax_arg(uri): + if not ax_ns: + return u"" + prefix = "openid." + ax_ns + ".type." + ax_name = None + for name in self.request.arguments.keys(): + if self.get_argument(name) == uri and name.startswith(prefix): + part = name[len(prefix):] + ax_name = "openid." + ax_ns + ".value." + part + break + if not ax_name: + return u"" + return self.get_argument(ax_name, u"") + + email = get_ax_arg("http://axschema.org/contact/email") + name = get_ax_arg("http://axschema.org/namePerson") + first_name = get_ax_arg("http://axschema.org/namePerson/first") + last_name = get_ax_arg("http://axschema.org/namePerson/last") + username = get_ax_arg("http://axschema.org/namePerson/friendly") + locale = get_ax_arg("http://axschema.org/pref/language").lower() + user = dict() + name_parts = [] + if first_name: + user["first_name"] = first_name + name_parts.append(first_name) + if last_name: + user["last_name"] = last_name + name_parts.append(last_name) + if name: + user["name"] = name + elif name_parts: + user["name"] = u" ".join(name_parts) + elif email: + user["name"] = email.split("@")[0] + if email: + user["email"] = email + if locale: + user["locale"] = locale + if username: + user["username"] = username + claimed_id = self.get_argument("openid.claimed_id", None) + if claimed_id: + user["claimed_id"] = claimed_id + future_set_result_unless_cancelled(future, user) + + def get_auth_http_client(self): + """Returns the `.AsyncHTTPClient` instance to be used for auth requests. + + May be overridden by subclasses to use an HTTP client other than + the default. + """ + return httpclient.AsyncHTTPClient() + + +class OAuthMixin(object): + """Abstract implementation of OAuth 1.0 and 1.0a. + + See `TwitterMixin` below for an example implementation. + + Class attributes: + + * ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url. + * ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url. + * ``_OAUTH_VERSION``: May be either "1.0" or "1.0a". + * ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires + advance registration of callbacks. + + Subclasses must also override the `_oauth_get_user_future` and + `_oauth_consumer_token` methods. + """ + @_non_deprecated_return_future + def authorize_redirect(self, callback_uri=None, extra_params=None, + http_client=None, callback=None): + """Redirects the user to obtain OAuth authorization for this service. + + The ``callback_uri`` may be omitted if you have previously + registered a callback URI with the third-party service. For + some services, you must use a previously-registered callback + URI and cannot specify a callback via this method. + + This method sets a cookie called ``_oauth_request_token`` which is + subsequently used (and cleared) in `get_authenticated_user` for + security purposes. + + This method is asynchronous and must be called with ``await`` + or ``yield`` (This is different from other ``auth*_redirect`` + methods defined in this module). It calls + `.RequestHandler.finish` for you so you should not write any + other response after it returns. + + .. versionchanged:: 3.1 + Now returns a `.Future` and takes an optional callback, for + compatibility with `.gen.coroutine`. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + + """ + if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): + raise Exception("This service does not support oauth_callback") + if http_client is None: + http_client = self.get_auth_http_client() + if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": + fut = http_client.fetch( + self._oauth_request_token_url(callback_uri=callback_uri, + extra_params=extra_params)) + fut.add_done_callback(wrap(functools.partial( + self._on_request_token, + self._OAUTH_AUTHORIZE_URL, + callback_uri, + callback))) + else: + fut = http_client.fetch(self._oauth_request_token_url()) + fut.add_done_callback( + wrap(functools.partial( + self._on_request_token, self._OAUTH_AUTHORIZE_URL, + callback_uri, + callback))) + + @_auth_return_future + def get_authenticated_user(self, callback, http_client=None): + """Gets the OAuth authorized user and access token. + + This method should be called from the handler for your + OAuth callback URL to complete the registration process. We run the + callback with the authenticated user dictionary. This dictionary + will contain an ``access_key`` which can be used to make authorized + requests to this service on behalf of the user. The dictionary will + also contain other fields such as ``name``, depending on the service + used. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + future = callback + request_key = escape.utf8(self.get_argument("oauth_token")) + oauth_verifier = self.get_argument("oauth_verifier", None) + request_cookie = self.get_cookie("_oauth_request_token") + if not request_cookie: + future.set_exception(AuthError( + "Missing OAuth request token cookie")) + return + self.clear_cookie("_oauth_request_token") + cookie_key, cookie_secret = [ + base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] + if cookie_key != request_key: + future.set_exception(AuthError( + "Request token does not match cookie")) + return + token = dict(key=cookie_key, secret=cookie_secret) + if oauth_verifier: + token["verifier"] = oauth_verifier + if http_client is None: + http_client = self.get_auth_http_client() + fut = http_client.fetch(self._oauth_access_token_url(token)) + fut.add_done_callback(wrap(functools.partial(self._on_access_token, callback))) + + def _oauth_request_token_url(self, callback_uri=None, extra_params=None): + consumer_token = self._oauth_consumer_token() + url = self._OAUTH_REQUEST_TOKEN_URL + args = dict( + oauth_consumer_key=escape.to_basestring(consumer_token["key"]), + oauth_signature_method="HMAC-SHA1", + oauth_timestamp=str(int(time.time())), + oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), + oauth_version="1.0", + ) + if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": + if callback_uri == "oob": + args["oauth_callback"] = "oob" + elif callback_uri: + args["oauth_callback"] = urlparse.urljoin( + self.request.full_url(), callback_uri) + if extra_params: + args.update(extra_params) + signature = _oauth10a_signature(consumer_token, "GET", url, args) + else: + signature = _oauth_signature(consumer_token, "GET", url, args) + + args["oauth_signature"] = signature + return url + "?" + urllib_parse.urlencode(args) + + def _on_request_token(self, authorize_url, callback_uri, callback, + response_fut): + try: + response = response_fut.result() + except Exception as e: + raise Exception("Could not get request token: %s" % e) + request_token = _oauth_parse_response(response.body) + data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" + + base64.b64encode(escape.utf8(request_token["secret"]))) + self.set_cookie("_oauth_request_token", data) + args = dict(oauth_token=request_token["key"]) + if callback_uri == "oob": + self.finish(authorize_url + "?" + urllib_parse.urlencode(args)) + callback() + return + elif callback_uri: + args["oauth_callback"] = urlparse.urljoin( + self.request.full_url(), callback_uri) + self.redirect(authorize_url + "?" + urllib_parse.urlencode(args)) + callback() + + def _oauth_access_token_url(self, request_token): + consumer_token = self._oauth_consumer_token() + url = self._OAUTH_ACCESS_TOKEN_URL + args = dict( + oauth_consumer_key=escape.to_basestring(consumer_token["key"]), + oauth_token=escape.to_basestring(request_token["key"]), + oauth_signature_method="HMAC-SHA1", + oauth_timestamp=str(int(time.time())), + oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), + oauth_version="1.0", + ) + if "verifier" in request_token: + args["oauth_verifier"] = request_token["verifier"] + + if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": + signature = _oauth10a_signature(consumer_token, "GET", url, args, + request_token) + else: + signature = _oauth_signature(consumer_token, "GET", url, args, + request_token) + + args["oauth_signature"] = signature + return url + "?" + urllib_parse.urlencode(args) + + def _on_access_token(self, future, response_fut): + try: + response = response_fut.result() + except Exception: + future.set_exception(AuthError("Could not fetch access token")) + return + + access_token = _oauth_parse_response(response.body) + fut = self._oauth_get_user_future(access_token) + fut = gen.convert_yielded(fut) + fut.add_done_callback( + wrap(functools.partial(self._on_oauth_get_user, access_token, future))) + + def _oauth_consumer_token(self): + """Subclasses must override this to return their OAuth consumer keys. + + The return value should be a `dict` with keys ``key`` and ``secret``. + """ + raise NotImplementedError() + + @_non_deprecated_return_future + def _oauth_get_user_future(self, access_token, callback): + """Subclasses must override this to get basic information about the + user. + + Should return a `.Future` whose result is a dictionary + containing information about the user, which may have been + retrieved by using ``access_token`` to make a request to the + service. + + The access token will be added to the returned dictionary to make + the result of `get_authenticated_user`. + + For backwards compatibility, the callback-based ``_oauth_get_user`` + method is also supported. + + .. versionchanged:: 5.1 + + Subclasses may also define this method with ``async def``. + + .. deprecated:: 5.1 + + The ``_oauth_get_user`` fallback is deprecated and support for it + will be removed in 6.0. + """ + warnings.warn("_oauth_get_user is deprecated, override _oauth_get_user_future instead", + DeprecationWarning) + # By default, call the old-style _oauth_get_user, but new code + # should override this method instead. + self._oauth_get_user(access_token, callback) + + def _oauth_get_user(self, access_token, callback): + raise NotImplementedError() + + def _on_oauth_get_user(self, access_token, future, user_future): + if user_future.exception() is not None: + future.set_exception(user_future.exception()) + return + user = user_future.result() + if not user: + future.set_exception(AuthError("Error getting user")) + return + user["access_token"] = access_token + future_set_result_unless_cancelled(future, user) + + def _oauth_request_parameters(self, url, access_token, parameters={}, + method="GET"): + """Returns the OAuth parameters as a dict for the given request. + + parameters should include all POST arguments and query string arguments + that will be sent with the request. + """ + consumer_token = self._oauth_consumer_token() + base_args = dict( + oauth_consumer_key=escape.to_basestring(consumer_token["key"]), + oauth_token=escape.to_basestring(access_token["key"]), + oauth_signature_method="HMAC-SHA1", + oauth_timestamp=str(int(time.time())), + oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), + oauth_version="1.0", + ) + args = {} + args.update(base_args) + args.update(parameters) + if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": + signature = _oauth10a_signature(consumer_token, method, url, args, + access_token) + else: + signature = _oauth_signature(consumer_token, method, url, args, + access_token) + base_args["oauth_signature"] = escape.to_basestring(signature) + return base_args + + def get_auth_http_client(self): + """Returns the `.AsyncHTTPClient` instance to be used for auth requests. + + May be overridden by subclasses to use an HTTP client other than + the default. + """ + return httpclient.AsyncHTTPClient() + + +class OAuth2Mixin(object): + """Abstract implementation of OAuth 2.0. + + See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example + implementations. + + Class attributes: + + * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url. + * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url. + """ + @_non_deprecated_return_future + def authorize_redirect(self, redirect_uri=None, client_id=None, + client_secret=None, extra_params=None, + callback=None, scope=None, response_type="code"): + """Redirects the user to obtain OAuth authorization for this service. + + Some providers require that you register a redirect URL with + your application instead of passing one via this method. You + should call this method to log the user in, and then call + ``get_authenticated_user`` in the handler for your + redirect URL to complete the authorization process. + + .. versionchanged:: 3.1 + Returns a `.Future` and takes an optional callback. These are + not strictly necessary as this method is synchronous, + but they are supplied for consistency with + `OAuthMixin.authorize_redirect`. + + .. deprecated:: 5.1 + + The ``callback`` argument and returned awaitable will be removed + in Tornado 6.0; this will be an ordinary synchronous function. + """ + args = { + "redirect_uri": redirect_uri, + "client_id": client_id, + "response_type": response_type + } + if extra_params: + args.update(extra_params) + if scope: + args['scope'] = ' '.join(scope) + self.redirect( + url_concat(self._OAUTH_AUTHORIZE_URL, args)) + callback() + + def _oauth_request_token_url(self, redirect_uri=None, client_id=None, + client_secret=None, code=None, + extra_params=None): + url = self._OAUTH_ACCESS_TOKEN_URL + args = dict( + redirect_uri=redirect_uri, + code=code, + client_id=client_id, + client_secret=client_secret, + ) + if extra_params: + args.update(extra_params) + return url_concat(url, args) + + @_auth_return_future + def oauth2_request(self, url, callback, access_token=None, + post_args=None, **args): + """Fetches the given URL auth an OAuth2 access token. + + If the request is a POST, ``post_args`` should be provided. Query + string arguments should be given as keyword arguments. + + Example usage: + + ..testcode:: + + class MainHandler(tornado.web.RequestHandler, + tornado.auth.FacebookGraphMixin): + @tornado.web.authenticated + async def get(self): + new_entry = await self.oauth2_request( + "https://graph.facebook.com/me/feed", + post_args={"message": "I am posting from my Tornado application!"}, + access_token=self.current_user["access_token"]) + + if not new_entry: + # Call failed; perhaps missing permission? + await self.authorize_redirect() + return + self.finish("Posted a message!") + + .. testoutput:: + :hide: + + .. versionadded:: 4.3 + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + all_args = {} + if access_token: + all_args["access_token"] = access_token + all_args.update(args) + + if all_args: + url += "?" + urllib_parse.urlencode(all_args) + callback = wrap(functools.partial(self._on_oauth2_request, callback)) + http = self.get_auth_http_client() + if post_args is not None: + fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) + else: + fut = http.fetch(url) + fut.add_done_callback(callback) + + def _on_oauth2_request(self, future, response_fut): + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError("Error response %s" % e)) + return + + future_set_result_unless_cancelled(future, escape.json_decode(response.body)) + + def get_auth_http_client(self): + """Returns the `.AsyncHTTPClient` instance to be used for auth requests. + + May be overridden by subclasses to use an HTTP client other than + the default. + + .. versionadded:: 4.3 + """ + return httpclient.AsyncHTTPClient() + + +class TwitterMixin(OAuthMixin): + """Twitter OAuth authentication. + + To authenticate with Twitter, register your application with + Twitter at http://twitter.com/apps. Then copy your Consumer Key + and Consumer Secret to the application + `~tornado.web.Application.settings` ``twitter_consumer_key`` and + ``twitter_consumer_secret``. Use this mixin on the handler for the + URL you registered as your application's callback URL. + + When your application is set up, you can use this mixin like this + to authenticate the user with Twitter and get access to their stream: + + .. testcode:: + + class TwitterLoginHandler(tornado.web.RequestHandler, + tornado.auth.TwitterMixin): + async def get(self): + if self.get_argument("oauth_token", None): + user = await self.get_authenticated_user() + # Save the user using e.g. set_secure_cookie() + else: + await self.authorize_redirect() + + .. testoutput:: + :hide: + + The user object returned by `~OAuthMixin.get_authenticated_user` + includes the attributes ``username``, ``name``, ``access_token``, + and all of the custom Twitter user attributes described at + https://dev.twitter.com/docs/api/1.1/get/users/show + """ + _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" + _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" + _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize" + _OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate" + _OAUTH_NO_CALLBACKS = False + _TWITTER_BASE_URL = "https://api.twitter.com/1.1" + + @_non_deprecated_return_future + def authenticate_redirect(self, callback_uri=None, callback=None): + """Just like `~OAuthMixin.authorize_redirect`, but + auto-redirects if authorized. + + This is generally the right interface to use if you are using + Twitter for single-sign on. + + .. versionchanged:: 3.1 + Now returns a `.Future` and takes an optional callback, for + compatibility with `.gen.coroutine`. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + http = self.get_auth_http_client() + fut = http.fetch(self._oauth_request_token_url(callback_uri=callback_uri)) + fut.add_done_callback(wrap(functools.partial( + self._on_request_token, self._OAUTH_AUTHENTICATE_URL, + None, callback))) + + @_auth_return_future + def twitter_request(self, path, callback=None, access_token=None, + post_args=None, **args): + """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor`` + + The path should not include the format or API version number. + (we automatically use JSON format and API version 1). + + If the request is a POST, ``post_args`` should be provided. Query + string arguments should be given as keyword arguments. + + All the Twitter methods are documented at http://dev.twitter.com/ + + Many methods require an OAuth access token which you can + obtain through `~OAuthMixin.authorize_redirect` and + `~OAuthMixin.get_authenticated_user`. The user returned through that + process includes an 'access_token' attribute that can be used + to make authenticated requests via this method. Example + usage: + + .. testcode:: + + class MainHandler(tornado.web.RequestHandler, + tornado.auth.TwitterMixin): + @tornado.web.authenticated + async def get(self): + new_entry = await self.twitter_request( + "/statuses/update", + post_args={"status": "Testing Tornado Web Server"}, + access_token=self.current_user["access_token"]) + if not new_entry: + # Call failed; perhaps missing permission? + yield self.authorize_redirect() + return + self.finish("Posted a message!") + + .. testoutput:: + :hide: + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + if path.startswith('http:') or path.startswith('https:'): + # Raw urls are useful for e.g. search which doesn't follow the + # usual pattern: http://search.twitter.com/search.json + url = path + else: + url = self._TWITTER_BASE_URL + path + ".json" + # Add the OAuth resource request signature if we have credentials + if access_token: + all_args = {} + all_args.update(args) + all_args.update(post_args or {}) + method = "POST" if post_args is not None else "GET" + oauth = self._oauth_request_parameters( + url, access_token, all_args, method=method) + args.update(oauth) + if args: + url += "?" + urllib_parse.urlencode(args) + http = self.get_auth_http_client() + http_callback = wrap(functools.partial(self._on_twitter_request, callback, url)) + if post_args is not None: + fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) + else: + fut = http.fetch(url) + fut.add_done_callback(http_callback) + + def _on_twitter_request(self, future, url, response_fut): + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError( + "Error response %s fetching %s" % (e, url))) + return + future_set_result_unless_cancelled(future, escape.json_decode(response.body)) + + def _oauth_consumer_token(self): + self.require_setting("twitter_consumer_key", "Twitter OAuth") + self.require_setting("twitter_consumer_secret", "Twitter OAuth") + return dict( + key=self.settings["twitter_consumer_key"], + secret=self.settings["twitter_consumer_secret"]) + + @gen.coroutine + def _oauth_get_user_future(self, access_token): + user = yield self.twitter_request( + "/account/verify_credentials", + access_token=access_token) + if user: + user["username"] = user["screen_name"] + raise gen.Return(user) + + +class GoogleOAuth2Mixin(OAuth2Mixin): + """Google authentication using OAuth2. + + In order to use, register your application with Google and copy the + relevant parameters to your application settings. + + * Go to the Google Dev Console at http://console.developers.google.com + * Select a project, or create a new one. + * In the sidebar on the left, select APIs & Auth. + * In the list of APIs, find the Google+ API service and set it to ON. + * In the sidebar on the left, select Credentials. + * In the OAuth section of the page, select Create New Client ID. + * Set the Redirect URI to point to your auth handler + * Copy the "Client secret" and "Client ID" to the application settings as + {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}} + + .. versionadded:: 3.2 + """ + _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth" + _OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token" + _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo" + _OAUTH_NO_CALLBACKS = False + _OAUTH_SETTINGS_KEY = 'google_oauth' + + @_auth_return_future + def get_authenticated_user(self, redirect_uri, code, callback): + """Handles the login for the Google user, returning an access token. + + The result is a dictionary containing an ``access_token`` field + ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). + Unlike other ``get_authenticated_user`` methods in this package, + this method does not return any additional information about the user. + The returned access token can be used with `OAuth2Mixin.oauth2_request` + to request additional information (perhaps from + ``https://www.googleapis.com/oauth2/v2/userinfo``) + + Example usage: + + .. testcode:: + + class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, + tornado.auth.GoogleOAuth2Mixin): + async def get(self): + if self.get_argument('code', False): + access = await self.get_authenticated_user( + redirect_uri='http://your.site.com/auth/google', + code=self.get_argument('code')) + user = await self.oauth2_request( + "https://www.googleapis.com/oauth2/v1/userinfo", + access_token=access["access_token"]) + # Save the user and access token with + # e.g. set_secure_cookie. + else: + await self.authorize_redirect( + redirect_uri='http://your.site.com/auth/google', + client_id=self.settings['google_oauth']['key'], + scope=['profile', 'email'], + response_type='code', + extra_params={'approval_prompt': 'auto'}) + + .. testoutput:: + :hide: + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ # noqa: E501 + http = self.get_auth_http_client() + body = urllib_parse.urlencode({ + "redirect_uri": redirect_uri, + "code": code, + "client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'], + "client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'], + "grant_type": "authorization_code", + }) + + fut = http.fetch(self._OAUTH_ACCESS_TOKEN_URL, + method="POST", + headers={'Content-Type': 'application/x-www-form-urlencoded'}, + body=body) + fut.add_done_callback(wrap(functools.partial(self._on_access_token, callback))) + + def _on_access_token(self, future, response_fut): + """Callback function for the exchange to the access token.""" + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError('Google auth error: %s' % str(e))) + return + + args = escape.json_decode(response.body) + future_set_result_unless_cancelled(future, args) + + +class FacebookGraphMixin(OAuth2Mixin): + """Facebook authentication using the new Graph API and OAuth2.""" + _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?" + _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?" + _OAUTH_NO_CALLBACKS = False + _FACEBOOK_BASE_URL = "https://graph.facebook.com" + + @_auth_return_future + def get_authenticated_user(self, redirect_uri, client_id, client_secret, + code, callback, extra_fields=None): + """Handles the login for the Facebook user, returning a user object. + + Example usage: + + .. testcode:: + + class FacebookGraphLoginHandler(tornado.web.RequestHandler, + tornado.auth.FacebookGraphMixin): + async def get(self): + if self.get_argument("code", False): + user = await self.get_authenticated_user( + redirect_uri='/auth/facebookgraph/', + client_id=self.settings["facebook_api_key"], + client_secret=self.settings["facebook_secret"], + code=self.get_argument("code")) + # Save the user with e.g. set_secure_cookie + else: + await self.authorize_redirect( + redirect_uri='/auth/facebookgraph/', + client_id=self.settings["facebook_api_key"], + extra_params={"scope": "read_stream,offline_access"}) + + .. testoutput:: + :hide: + + This method returns a dictionary which may contain the following fields: + + * ``access_token``, a string which may be passed to `facebook_request` + * ``session_expires``, an integer encoded as a string representing + the time until the access token expires in seconds. This field should + be used like ``int(user['session_expires'])``; in a future version of + Tornado it will change from a string to an integer. + * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``, + ``link``, plus any fields named in the ``extra_fields`` argument. These + fields are copied from the Facebook graph API + `user object `_ + + .. versionchanged:: 4.5 + The ``session_expires`` field was updated to support changes made to the + Facebook API in March 2017. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + http = self.get_auth_http_client() + args = { + "redirect_uri": redirect_uri, + "code": code, + "client_id": client_id, + "client_secret": client_secret, + } + + fields = set(['id', 'name', 'first_name', 'last_name', + 'locale', 'picture', 'link']) + if extra_fields: + fields.update(extra_fields) + + fut = http.fetch(self._oauth_request_token_url(**args)) + fut.add_done_callback(wrap(functools.partial(self._on_access_token, redirect_uri, client_id, + client_secret, callback, fields))) + + @gen.coroutine + def _on_access_token(self, redirect_uri, client_id, client_secret, + future, fields, response_fut): + try: + response = response_fut.result() + except Exception as e: + future.set_exception(AuthError('Facebook auth error: %s' % str(e))) + return + + args = escape.json_decode(response.body) + session = { + "access_token": args.get("access_token"), + "expires_in": args.get("expires_in") + } + + user = yield self.facebook_request( + path="/me", + access_token=session["access_token"], + appsecret_proof=hmac.new(key=client_secret.encode('utf8'), + msg=session["access_token"].encode('utf8'), + digestmod=hashlib.sha256).hexdigest(), + fields=",".join(fields) + ) + + if user is None: + future_set_result_unless_cancelled(future, None) + return + + fieldmap = {} + for field in fields: + fieldmap[field] = user.get(field) + + # session_expires is converted to str for compatibility with + # older versions in which the server used url-encoding and + # this code simply returned the string verbatim. + # This should change in Tornado 5.0. + fieldmap.update({"access_token": session["access_token"], + "session_expires": str(session.get("expires_in"))}) + future_set_result_unless_cancelled(future, fieldmap) + + @_auth_return_future + def facebook_request(self, path, callback, access_token=None, + post_args=None, **args): + """Fetches the given relative API path, e.g., "/btaylor/picture" + + If the request is a POST, ``post_args`` should be provided. Query + string arguments should be given as keyword arguments. + + An introduction to the Facebook Graph API can be found at + http://developers.facebook.com/docs/api + + Many methods require an OAuth access token which you can + obtain through `~OAuth2Mixin.authorize_redirect` and + `get_authenticated_user`. The user returned through that + process includes an ``access_token`` attribute that can be + used to make authenticated requests via this method. + + Example usage: + + .. testcode:: + + class MainHandler(tornado.web.RequestHandler, + tornado.auth.FacebookGraphMixin): + @tornado.web.authenticated + async def get(self): + new_entry = await self.facebook_request( + "/me/feed", + post_args={"message": "I am posting from my Tornado application!"}, + access_token=self.current_user["access_token"]) + + if not new_entry: + # Call failed; perhaps missing permission? + yield self.authorize_redirect() + return + self.finish("Posted a message!") + + .. testoutput:: + :hide: + + The given path is relative to ``self._FACEBOOK_BASE_URL``, + by default "https://graph.facebook.com". + + This method is a wrapper around `OAuth2Mixin.oauth2_request`; + the only difference is that this method takes a relative path, + while ``oauth2_request`` takes a complete url. + + .. versionchanged:: 3.1 + Added the ability to override ``self._FACEBOOK_BASE_URL``. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + url = self._FACEBOOK_BASE_URL + path + # Thanks to the _auth_return_future decorator, our "callback" + # argument is a Future, which we cannot pass as a callback to + # oauth2_request. Instead, have oauth2_request return a + # future and chain them together. + oauth_future = self.oauth2_request(url, access_token=access_token, + post_args=post_args, **args) + chain_future(oauth_future, callback) + + +def _oauth_signature(consumer_token, method, url, parameters={}, token=None): + """Calculates the HMAC-SHA1 OAuth signature for the given request. + + See http://oauth.net/core/1.0/#signing_process + """ + parts = urlparse.urlparse(url) + scheme, netloc, path = parts[:3] + normalized_url = scheme.lower() + "://" + netloc.lower() + path + + base_elems = [] + base_elems.append(method.upper()) + base_elems.append(normalized_url) + base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) + for k, v in sorted(parameters.items()))) + base_string = "&".join(_oauth_escape(e) for e in base_elems) + + key_elems = [escape.utf8(consumer_token["secret"])] + key_elems.append(escape.utf8(token["secret"] if token else "")) + key = b"&".join(key_elems) + + hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) + return binascii.b2a_base64(hash.digest())[:-1] + + +def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None): + """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request. + + See http://oauth.net/core/1.0a/#signing_process + """ + parts = urlparse.urlparse(url) + scheme, netloc, path = parts[:3] + normalized_url = scheme.lower() + "://" + netloc.lower() + path + + base_elems = [] + base_elems.append(method.upper()) + base_elems.append(normalized_url) + base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) + for k, v in sorted(parameters.items()))) + + base_string = "&".join(_oauth_escape(e) for e in base_elems) + key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))] + key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else "")) + key = b"&".join(key_elems) + + hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) + return binascii.b2a_base64(hash.digest())[:-1] + + +def _oauth_escape(val): + if isinstance(val, unicode_type): + val = val.encode("utf-8") + return urllib_parse.quote(val, safe="~") + + +def _oauth_parse_response(body): + # I can't find an officially-defined encoding for oauth responses and + # have never seen anyone use non-ascii. Leave the response in a byte + # string for python 2, and use utf8 on python 3. + body = escape.native_str(body) + p = urlparse.parse_qs(body, keep_blank_values=False) + token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) + + # Add the extra parameters the Provider included to the token + special = ("oauth_token", "oauth_token_secret") + token.update((k, p[k][0]) for k in p if k not in special) + return token diff --git a/server/www/packages/packages-windows/x86/tornado/autoreload.py b/server/www/packages/packages-windows/x86/tornado/autoreload.py new file mode 100644 index 0000000..7d69474 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/autoreload.py @@ -0,0 +1,356 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Automatically restart the server when a source file is modified. + +Most applications should not access this module directly. Instead, +pass the keyword argument ``autoreload=True`` to the +`tornado.web.Application` constructor (or ``debug=True``, which +enables this setting and several others). This will enable autoreload +mode as well as checking for changes to templates and static +resources. Note that restarting is a destructive operation and any +requests in progress will be aborted when the process restarts. (If +you want to disable autoreload while using other debug-mode features, +pass both ``debug=True`` and ``autoreload=False``). + +This module can also be used as a command-line wrapper around scripts +such as unit test runners. See the `main` method for details. + +The command-line wrapper and Application debug modes can be used together. +This combination is encouraged as the wrapper catches syntax errors and +other import-time failures, while debug mode catches changes once +the server has started. + +This module depends on `.IOLoop`, so it will not work in WSGI applications +and Google App Engine. It also will not work correctly when `.HTTPServer`'s +multi-process mode is used. + +Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) +because it re-executes Python using ``sys.executable`` and ``sys.argv``. +Additionally, modifying these variables will cause reloading to behave +incorrectly. + +""" + +from __future__ import absolute_import, division, print_function + +import os +import sys + +# sys.path handling +# ----------------- +# +# If a module is run with "python -m", the current directory (i.e. "") +# is automatically prepended to sys.path, but not if it is run as +# "path/to/file.py". The processing for "-m" rewrites the former to +# the latter, so subsequent executions won't have the same path as the +# original. +# +# Conversely, when run as path/to/file.py, the directory containing +# file.py gets added to the path, which can cause confusion as imports +# may become relative in spite of the future import. +# +# We address the former problem by reconstructing the original command +# line (Python >= 3.4) or by setting the $PYTHONPATH environment +# variable (Python < 3.4) before re-execution so the new process will +# see the correct path. We attempt to address the latter problem when +# tornado.autoreload is run as __main__. + +if __name__ == "__main__": + # This sys.path manipulation must come before our imports (as much + # as possible - if we introduced a tornado.sys or tornado.os + # module we'd be in trouble), or else our imports would become + # relative again despite the future import. + # + # There is a separate __main__ block at the end of the file to call main(). + if sys.path[0] == os.path.dirname(__file__): + del sys.path[0] + +import functools +import logging +import os +import pkgutil # type: ignore +import sys +import traceback +import types +import subprocess +import weakref + +from tornado import ioloop +from tornado.log import gen_log +from tornado import process +from tornado.util import exec_in + +try: + import signal +except ImportError: + signal = None + +# os.execv is broken on Windows and can't properly parse command line +# arguments and executable name if they contain whitespaces. subprocess +# fixes that behavior. +_has_execv = sys.platform != 'win32' + +_watched_files = set() +_reload_hooks = [] +_reload_attempted = False +_io_loops = weakref.WeakKeyDictionary() # type: ignore +_autoreload_is_main = False +_original_argv = None +_original_spec = None + + +def start(check_time=500): + """Begins watching source files for changes. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + """ + io_loop = ioloop.IOLoop.current() + if io_loop in _io_loops: + return + _io_loops[io_loop] = True + if len(_io_loops) > 1: + gen_log.warning("tornado.autoreload started more than once in the same process") + modify_times = {} + callback = functools.partial(_reload_on_update, modify_times) + scheduler = ioloop.PeriodicCallback(callback, check_time) + scheduler.start() + + +def wait(): + """Wait for a watched file to change, then restart the process. + + Intended to be used at the end of scripts like unit test runners, + to run the tests again after any source file changes (but see also + the command-line interface in `main`) + """ + io_loop = ioloop.IOLoop() + io_loop.add_callback(start) + io_loop.start() + + +def watch(filename): + """Add a file to the watch list. + + All imported modules are watched by default. + """ + _watched_files.add(filename) + + +def add_reload_hook(fn): + """Add a function to be called before reloading the process. + + Note that for open file and socket handles it is generally + preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or + ``tornado.platform.auto.set_close_exec``) instead + of using a reload hook to close them. + """ + _reload_hooks.append(fn) + + +def _reload_on_update(modify_times): + if _reload_attempted: + # We already tried to reload and it didn't work, so don't try again. + return + if process.task_id() is not None: + # We're in a child process created by fork_processes. If child + # processes restarted themselves, they'd all restart and then + # all call fork_processes again. + return + for module in list(sys.modules.values()): + # Some modules play games with sys.modules (e.g. email/__init__.py + # in the standard library), and occasionally this can cause strange + # failures in getattr. Just ignore anything that's not an ordinary + # module. + if not isinstance(module, types.ModuleType): + continue + path = getattr(module, "__file__", None) + if not path: + continue + if path.endswith(".pyc") or path.endswith(".pyo"): + path = path[:-1] + _check_file(modify_times, path) + for path in _watched_files: + _check_file(modify_times, path) + + +def _check_file(modify_times, path): + try: + modified = os.stat(path).st_mtime + except Exception: + return + if path not in modify_times: + modify_times[path] = modified + return + if modify_times[path] != modified: + gen_log.info("%s modified; restarting server", path) + _reload() + + +def _reload(): + global _reload_attempted + _reload_attempted = True + for fn in _reload_hooks: + fn() + if hasattr(signal, "setitimer"): + # Clear the alarm signal set by + # ioloop.set_blocking_log_threshold so it doesn't fire + # after the exec. + signal.setitimer(signal.ITIMER_REAL, 0, 0) + # sys.path fixes: see comments at top of file. If __main__.__spec__ + # exists, we were invoked with -m and the effective path is about to + # change on re-exec. Reconstruct the original command line to + # ensure that the new process sees the same path we did. If + # __spec__ is not available (Python < 3.4), check instead if + # sys.path[0] is an empty string and add the current directory to + # $PYTHONPATH. + if _autoreload_is_main: + spec = _original_spec + argv = _original_argv + else: + spec = getattr(sys.modules['__main__'], '__spec__', None) + argv = sys.argv + if spec: + argv = ['-m', spec.name] + argv[1:] + else: + path_prefix = '.' + os.pathsep + if (sys.path[0] == '' and + not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): + os.environ["PYTHONPATH"] = (path_prefix + + os.environ.get("PYTHONPATH", "")) + if not _has_execv: + subprocess.Popen([sys.executable] + argv) + os._exit(0) + else: + try: + os.execv(sys.executable, [sys.executable] + argv) + except OSError: + # Mac OS X versions prior to 10.6 do not support execv in + # a process that contains multiple threads. Instead of + # re-executing in the current process, start a new one + # and cause the current process to exit. This isn't + # ideal since the new process is detached from the parent + # terminal and thus cannot easily be killed with ctrl-C, + # but it's better than not being able to autoreload at + # all. + # Unfortunately the errno returned in this case does not + # appear to be consistent, so we can't easily check for + # this error specifically. + os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + argv) + # At this point the IOLoop has been closed and finally + # blocks will experience errors if we allow the stack to + # unwind, so just exit uncleanly. + os._exit(0) + + +_USAGE = """\ +Usage: + python -m tornado.autoreload -m module.to.run [args...] + python -m tornado.autoreload path/to/script.py [args...] +""" + + +def main(): + """Command-line wrapper to re-run a script whenever its source changes. + + Scripts may be specified by filename or module name:: + + python -m tornado.autoreload -m tornado.test.runtests + python -m tornado.autoreload tornado/test/runtests.py + + Running a script with this wrapper is similar to calling + `tornado.autoreload.wait` at the end of the script, but this wrapper + can catch import-time problems like syntax errors that would otherwise + prevent the script from reaching its call to `wait`. + """ + # Remember that we were launched with autoreload as main. + # The main module can be tricky; set the variables both in our globals + # (which may be __main__) and the real importable version. + import tornado.autoreload + global _autoreload_is_main + global _original_argv, _original_spec + tornado.autoreload._autoreload_is_main = _autoreload_is_main = True + original_argv = sys.argv + tornado.autoreload._original_argv = _original_argv = original_argv + original_spec = getattr(sys.modules['__main__'], '__spec__', None) + tornado.autoreload._original_spec = _original_spec = original_spec + sys.argv = sys.argv[:] + if len(sys.argv) >= 3 and sys.argv[1] == "-m": + mode = "module" + module = sys.argv[2] + del sys.argv[1:3] + elif len(sys.argv) >= 2: + mode = "script" + script = sys.argv[1] + sys.argv = sys.argv[1:] + else: + print(_USAGE, file=sys.stderr) + sys.exit(1) + + try: + if mode == "module": + import runpy + runpy.run_module(module, run_name="__main__", alter_sys=True) + elif mode == "script": + with open(script) as f: + # Execute the script in our namespace instead of creating + # a new one so that something that tries to import __main__ + # (e.g. the unittest module) will see names defined in the + # script instead of just those defined in this module. + global __file__ + __file__ = script + # If __package__ is defined, imports may be incorrectly + # interpreted as relative to this module. + global __package__ + del __package__ + exec_in(f.read(), globals(), globals()) + except SystemExit as e: + logging.basicConfig() + gen_log.info("Script exited with status %s", e.code) + except Exception as e: + logging.basicConfig() + gen_log.warning("Script exited with uncaught exception", exc_info=True) + # If an exception occurred at import time, the file with the error + # never made it into sys.modules and so we won't know to watch it. + # Just to make sure we've covered everything, walk the stack trace + # from the exception and watch every file. + for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): + watch(filename) + if isinstance(e, SyntaxError): + # SyntaxErrors are special: their innermost stack frame is fake + # so extract_tb won't see it and we have to get the filename + # from the exception object. + watch(e.filename) + else: + logging.basicConfig() + gen_log.info("Script exited normally") + # restore sys.argv so subsequent executions will include autoreload + sys.argv = original_argv + + if mode == 'module': + # runpy did a fake import of the module as __main__, but now it's + # no longer in sys.modules. Figure out where it is and watch it. + loader = pkgutil.get_loader(module) + if loader is not None: + watch(loader.get_filename()) + + wait() + + +if __name__ == "__main__": + # See also the other __main__ block at the top of the file, which modifies + # sys.path before our imports + main() diff --git a/server/www/packages/packages-windows/x86/tornado/concurrent.py b/server/www/packages/packages-windows/x86/tornado/concurrent.py new file mode 100644 index 0000000..f7e6bcc --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/concurrent.py @@ -0,0 +1,660 @@ +# +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Utilities for working with ``Future`` objects. + +``Futures`` are a pattern for concurrent programming introduced in +Python 3.2 in the `concurrent.futures` package, and also adopted (in a +slightly different form) in Python 3.4's `asyncio` package. This +package defines a ``Future`` class that is an alias for `asyncio.Future` +when available, and a compatible implementation for older versions of +Python. It also includes some utility functions for interacting with +``Future`` objects. + +While this package is an important part of Tornado's internal +implementation, applications rarely need to interact with it +directly. +""" +from __future__ import absolute_import, division, print_function + +import functools +import platform +import textwrap +import traceback +import sys +import warnings + +from tornado.log import app_log +from tornado.stack_context import ExceptionStackContext, wrap +from tornado.util import raise_exc_info, ArgReplacer, is_finalizing + +try: + from concurrent import futures +except ImportError: + futures = None + +try: + import asyncio +except ImportError: + asyncio = None + +try: + import typing +except ImportError: + typing = None + + +# Can the garbage collector handle cycles that include __del__ methods? +# This is true in cpython beginning with version 3.4 (PEP 442). +_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and + sys.version_info >= (3, 4)) + + +class ReturnValueIgnoredError(Exception): + pass + +# This class and associated code in the future object is derived +# from the Trollius project, a backport of asyncio to Python 2.x - 3.x + + +class _TracebackLogger(object): + """Helper to log a traceback upon destruction if not cleared. + + This solves a nasty problem with Futures and Tasks that have an + exception set: if nobody asks for the exception, the exception is + never logged. This violates the Zen of Python: 'Errors should + never pass silently. Unless explicitly silenced.' + + However, we don't want to log the exception as soon as + set_exception() is called: if the calling code is written + properly, it will get the exception and handle it properly. But + we *do* want to log it if result() or exception() was never called + -- otherwise developers waste a lot of time wondering why their + buggy code fails silently. + + An earlier attempt added a __del__() method to the Future class + itself, but this backfired because the presence of __del__() + prevents garbage collection from breaking cycles. A way out of + this catch-22 is to avoid having a __del__() method on the Future + class itself, but instead to have a reference to a helper object + with a __del__() method that logs the traceback, where we ensure + that the helper object doesn't participate in cycles, and only the + Future has a reference to it. + + The helper object is added when set_exception() is called. When + the Future is collected, and the helper is present, the helper + object is also collected, and its __del__() method will log the + traceback. When the Future's result() or exception() method is + called (and a helper object is present), it removes the the helper + object, after calling its clear() method to prevent it from + logging. + + One downside is that we do a fair amount of work to extract the + traceback from the exception, even when it is never logged. It + would seem cheaper to just store the exception object, but that + references the traceback, which references stack frames, which may + reference the Future, which references the _TracebackLogger, and + then the _TracebackLogger would be included in a cycle, which is + what we're trying to avoid! As an optimization, we don't + immediately format the exception; we only do the work when + activate() is called, which call is delayed until after all the + Future's callbacks have run. Since usually a Future has at least + one callback (typically set by 'yield From') and usually that + callback extracts the callback, thereby removing the need to + format the exception. + + PS. I don't claim credit for this solution. I first heard of it + in a discussion about closing files when they are collected. + """ + + __slots__ = ('exc_info', 'formatted_tb') + + def __init__(self, exc_info): + self.exc_info = exc_info + self.formatted_tb = None + + def activate(self): + exc_info = self.exc_info + if exc_info is not None: + self.exc_info = None + self.formatted_tb = traceback.format_exception(*exc_info) + + def clear(self): + self.exc_info = None + self.formatted_tb = None + + def __del__(self, is_finalizing=is_finalizing): + if not is_finalizing() and self.formatted_tb: + app_log.error('Future exception was never retrieved: %s', + ''.join(self.formatted_tb).rstrip()) + + +class Future(object): + """Placeholder for an asynchronous result. + + A ``Future`` encapsulates the result of an asynchronous + operation. In synchronous applications ``Futures`` are used + to wait for the result from a thread or process pool; in + Tornado they are normally used with `.IOLoop.add_future` or by + yielding them in a `.gen.coroutine`. + + `tornado.concurrent.Future` is an alias for `asyncio.Future` when + that package is available (Python 3.4+). Unlike + `concurrent.futures.Future`, the ``Futures`` used by Tornado and + `asyncio` are not thread-safe (and therefore faster for use with + single-threaded event loops). + + In addition to ``exception`` and ``set_exception``, Tornado's + ``Future`` implementation supports storing an ``exc_info`` triple + to support better tracebacks on Python 2. To set an ``exc_info`` + triple, use `future_set_exc_info`, and to retrieve one, call + `result()` (which will raise it). + + .. versionchanged:: 4.0 + `tornado.concurrent.Future` is always a thread-unsafe ``Future`` + with support for the ``exc_info`` methods. Previously it would + be an alias for the thread-safe `concurrent.futures.Future` + if that package was available and fall back to the thread-unsafe + implementation if it was not. + + .. versionchanged:: 4.1 + If a `.Future` contains an error but that error is never observed + (by calling ``result()``, ``exception()``, or ``exc_info()``), + a stack trace will be logged when the `.Future` is garbage collected. + This normally indicates an error in the application, but in cases + where it results in undesired logging it may be necessary to + suppress the logging by ensuring that the exception is observed: + ``f.add_done_callback(lambda f: f.exception())``. + + .. versionchanged:: 5.0 + + This class was previoiusly available under the name + ``TracebackFuture``. This name, which was deprecated since + version 4.0, has been removed. When `asyncio` is available + ``tornado.concurrent.Future`` is now an alias for + `asyncio.Future`. Like `asyncio.Future`, callbacks are now + always scheduled on the `.IOLoop` and are never run + synchronously. + + """ + def __init__(self): + self._done = False + self._result = None + self._exc_info = None + + self._log_traceback = False # Used for Python >= 3.4 + self._tb_logger = None # Used for Python <= 3.3 + + self._callbacks = [] + + # Implement the Python 3.5 Awaitable protocol if possible + # (we can't use return and yield together until py33). + if sys.version_info >= (3, 3): + exec(textwrap.dedent(""" + def __await__(self): + return (yield self) + """)) + else: + # Py2-compatible version for use with cython. + def __await__(self): + result = yield self + # StopIteration doesn't take args before py33, + # but Cython recognizes the args tuple. + e = StopIteration() + e.args = (result,) + raise e + + def cancel(self): + """Cancel the operation, if possible. + + Tornado ``Futures`` do not support cancellation, so this method always + returns False. + """ + return False + + def cancelled(self): + """Returns True if the operation has been cancelled. + + Tornado ``Futures`` do not support cancellation, so this method + always returns False. + """ + return False + + def running(self): + """Returns True if this operation is currently running.""" + return not self._done + + def done(self): + """Returns True if the future has finished running.""" + return self._done + + def _clear_tb_log(self): + self._log_traceback = False + if self._tb_logger is not None: + self._tb_logger.clear() + self._tb_logger = None + + def result(self, timeout=None): + """If the operation succeeded, return its result. If it failed, + re-raise its exception. + + This method takes a ``timeout`` argument for compatibility with + `concurrent.futures.Future` but it is an error to call it + before the `Future` is done, so the ``timeout`` is never used. + """ + self._clear_tb_log() + if self._result is not None: + return self._result + if self._exc_info is not None: + try: + raise_exc_info(self._exc_info) + finally: + self = None + self._check_done() + return self._result + + def exception(self, timeout=None): + """If the operation raised an exception, return the `Exception` + object. Otherwise returns None. + + This method takes a ``timeout`` argument for compatibility with + `concurrent.futures.Future` but it is an error to call it + before the `Future` is done, so the ``timeout`` is never used. + """ + self._clear_tb_log() + if self._exc_info is not None: + return self._exc_info[1] + else: + self._check_done() + return None + + def add_done_callback(self, fn): + """Attaches the given callback to the `Future`. + + It will be invoked with the `Future` as its argument when the Future + has finished running and its result is available. In Tornado + consider using `.IOLoop.add_future` instead of calling + `add_done_callback` directly. + """ + if self._done: + from tornado.ioloop import IOLoop + IOLoop.current().add_callback(fn, self) + else: + self._callbacks.append(fn) + + def set_result(self, result): + """Sets the result of a ``Future``. + + It is undefined to call any of the ``set`` methods more than once + on the same object. + """ + self._result = result + self._set_done() + + def set_exception(self, exception): + """Sets the exception of a ``Future.``""" + self.set_exc_info( + (exception.__class__, + exception, + getattr(exception, '__traceback__', None))) + + def exc_info(self): + """Returns a tuple in the same format as `sys.exc_info` or None. + + .. versionadded:: 4.0 + """ + self._clear_tb_log() + return self._exc_info + + def set_exc_info(self, exc_info): + """Sets the exception information of a ``Future.`` + + Preserves tracebacks on Python 2. + + .. versionadded:: 4.0 + """ + self._exc_info = exc_info + self._log_traceback = True + if not _GC_CYCLE_FINALIZERS: + self._tb_logger = _TracebackLogger(exc_info) + + try: + self._set_done() + finally: + # Activate the logger after all callbacks have had a + # chance to call result() or exception(). + if self._log_traceback and self._tb_logger is not None: + self._tb_logger.activate() + self._exc_info = exc_info + + def _check_done(self): + if not self._done: + raise Exception("DummyFuture does not support blocking for results") + + def _set_done(self): + self._done = True + if self._callbacks: + from tornado.ioloop import IOLoop + loop = IOLoop.current() + for cb in self._callbacks: + loop.add_callback(cb, self) + self._callbacks = None + + # On Python 3.3 or older, objects with a destructor part of a reference + # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to + # the PEP 442. + if _GC_CYCLE_FINALIZERS: + def __del__(self, is_finalizing=is_finalizing): + if is_finalizing() or not self._log_traceback: + # set_exception() was not called, or result() or exception() + # has consumed the exception + return + + tb = traceback.format_exception(*self._exc_info) + + app_log.error('Future %r exception was never retrieved: %s', + self, ''.join(tb).rstrip()) + + +if asyncio is not None: + Future = asyncio.Future # noqa + +if futures is None: + FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]] +else: + FUTURES = (futures.Future, Future) + + +def is_future(x): + return isinstance(x, FUTURES) + + +class DummyExecutor(object): + def submit(self, fn, *args, **kwargs): + future = Future() + try: + future_set_result_unless_cancelled(future, fn(*args, **kwargs)) + except Exception: + future_set_exc_info(future, sys.exc_info()) + return future + + def shutdown(self, wait=True): + pass + + +dummy_executor = DummyExecutor() + + +def run_on_executor(*args, **kwargs): + """Decorator to run a synchronous method asynchronously on an executor. + + The decorated method may be called with a ``callback`` keyword + argument and returns a future. + + The executor to be used is determined by the ``executor`` + attributes of ``self``. To use a different attribute name, pass a + keyword argument to the decorator:: + + @run_on_executor(executor='_thread_pool') + def foo(self): + pass + + This decorator should not be confused with the similarly-named + `.IOLoop.run_in_executor`. In general, using ``run_in_executor`` + when *calling* a blocking method is recommended instead of using + this decorator when *defining* a method. If compatibility with older + versions of Tornado is required, consider defining an executor + and using ``executor.submit()`` at the call site. + + .. versionchanged:: 4.2 + Added keyword arguments to use alternative attributes. + + .. versionchanged:: 5.0 + Always uses the current IOLoop instead of ``self.io_loop``. + + .. versionchanged:: 5.1 + Returns a `.Future` compatible with ``await`` instead of a + `concurrent.futures.Future`. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in + 6.0. The decorator itself is discouraged in new code but will + not be removed in 6.0. + """ + def run_on_executor_decorator(fn): + executor = kwargs.get("executor", "executor") + + @functools.wraps(fn) + def wrapper(self, *args, **kwargs): + callback = kwargs.pop("callback", None) + async_future = Future() + conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs) + chain_future(conc_future, async_future) + if callback: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) + from tornado.ioloop import IOLoop + IOLoop.current().add_future( + async_future, lambda future: callback(future.result())) + return async_future + return wrapper + if args and kwargs: + raise ValueError("cannot combine positional and keyword args") + if len(args) == 1: + return run_on_executor_decorator(args[0]) + elif len(args) != 0: + raise ValueError("expected 1 argument, got %d", len(args)) + return run_on_executor_decorator + + +_NO_RESULT = object() + + +def return_future(f): + """Decorator to make a function that returns via callback return a + `Future`. + + This decorator was provided to ease the transition from + callback-oriented code to coroutines. It is not recommended for + new code. + + The wrapped function should take a ``callback`` keyword argument + and invoke it with one argument when it has finished. To signal failure, + the function can simply raise an exception (which will be + captured by the `.StackContext` and passed along to the ``Future``). + + From the caller's perspective, the callback argument is optional. + If one is given, it will be invoked when the function is complete + with ``Future.result()`` as an argument. If the function fails, the + callback will not be run and an exception will be raised into the + surrounding `.StackContext`. + + If no callback is given, the caller should use the ``Future`` to + wait for the function to complete (perhaps by yielding it in a + coroutine, or passing it to `.IOLoop.add_future`). + + Usage: + + .. testcode:: + + @return_future + def future_func(arg1, arg2, callback): + # Do stuff (possibly asynchronous) + callback(result) + + async def caller(): + await future_func(arg1, arg2) + + .. + + Note that ``@return_future`` and ``@gen.engine`` can be applied to the + same function, provided ``@return_future`` appears first. However, + consider using ``@gen.coroutine`` instead of this combination. + + .. versionchanged:: 5.1 + + Now raises a `.DeprecationWarning` if a callback argument is passed to + the decorated function and deprecation warnings are enabled. + + .. deprecated:: 5.1 + + This decorator will be removed in Tornado 6.0. New code should + use coroutines directly instead of wrapping callback-based code + with this decorator. Interactions with non-Tornado + callback-based code should be managed explicitly to avoid + relying on the `.ExceptionStackContext` built into this + decorator. + """ + warnings.warn("@return_future is deprecated, use coroutines instead", + DeprecationWarning) + return _non_deprecated_return_future(f, warn=True) + + +def _non_deprecated_return_future(f, warn=False): + # Allow auth.py to use this decorator without triggering + # deprecation warnings. This will go away once auth.py has removed + # its legacy interfaces in 6.0. + replacer = ArgReplacer(f, 'callback') + + @functools.wraps(f) + def wrapper(*args, **kwargs): + future = Future() + callback, args, kwargs = replacer.replace( + lambda value=_NO_RESULT: future_set_result_unless_cancelled(future, value), + args, kwargs) + + def handle_error(typ, value, tb): + future_set_exc_info(future, (typ, value, tb)) + return True + exc_info = None + esc = ExceptionStackContext(handle_error, delay_warning=True) + with esc: + if not warn: + # HACK: In non-deprecated mode (only used in auth.py), + # suppress the warning entirely. Since this is added + # in a 5.1 patch release and already removed in 6.0 + # I'm prioritizing a minimial change instead of a + # clean solution. + esc.delay_warning = False + try: + result = f(*args, **kwargs) + if result is not None: + raise ReturnValueIgnoredError( + "@return_future should not be used with functions " + "that return values") + except: + exc_info = sys.exc_info() + raise + if exc_info is not None: + # If the initial synchronous part of f() raised an exception, + # go ahead and raise it to the caller directly without waiting + # for them to inspect the Future. + future.result() + + # If the caller passed in a callback, schedule it to be called + # when the future resolves. It is important that this happens + # just before we return the future, or else we risk confusing + # stack contexts with multiple exceptions (one here with the + # immediate exception, and again when the future resolves and + # the callback triggers its exception by calling future.result()). + if callback is not None: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) + + def run_callback(future): + result = future.result() + if result is _NO_RESULT: + callback() + else: + callback(future.result()) + future_add_done_callback(future, wrap(run_callback)) + return future + return wrapper + + +def chain_future(a, b): + """Chain two futures together so that when one completes, so does the other. + + The result (success or failure) of ``a`` will be copied to ``b``, unless + ``b`` has already been completed or cancelled by the time ``a`` finishes. + + .. versionchanged:: 5.0 + + Now accepts both Tornado/asyncio `Future` objects and + `concurrent.futures.Future`. + + """ + def copy(future): + assert future is a + if b.done(): + return + if (hasattr(a, 'exc_info') and + a.exc_info() is not None): + future_set_exc_info(b, a.exc_info()) + elif a.exception() is not None: + b.set_exception(a.exception()) + else: + b.set_result(a.result()) + if isinstance(a, Future): + future_add_done_callback(a, copy) + else: + # concurrent.futures.Future + from tornado.ioloop import IOLoop + IOLoop.current().add_future(a, copy) + + +def future_set_result_unless_cancelled(future, value): + """Set the given ``value`` as the `Future`'s result, if not cancelled. + + Avoids asyncio.InvalidStateError when calling set_result() on + a cancelled `asyncio.Future`. + + .. versionadded:: 5.0 + """ + if not future.cancelled(): + future.set_result(value) + + +def future_set_exc_info(future, exc_info): + """Set the given ``exc_info`` as the `Future`'s exception. + + Understands both `asyncio.Future` and Tornado's extensions to + enable better tracebacks on Python 2. + + .. versionadded:: 5.0 + """ + if hasattr(future, 'set_exc_info'): + # Tornado's Future + future.set_exc_info(exc_info) + else: + # asyncio.Future + future.set_exception(exc_info[1]) + + +def future_add_done_callback(future, callback): + """Arrange to call ``callback`` when ``future`` is complete. + + ``callback`` is invoked with one argument, the ``future``. + + If ``future`` is already done, ``callback`` is invoked immediately. + This may differ from the behavior of ``Future.add_done_callback``, + which makes no such guarantee. + + .. versionadded:: 5.0 + """ + if future.done(): + callback(future) + else: + future.add_done_callback(callback) diff --git a/server/www/packages/packages-windows/x86/tornado/curl_httpclient.py b/server/www/packages/packages-windows/x86/tornado/curl_httpclient.py new file mode 100644 index 0000000..7f5cb10 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/curl_httpclient.py @@ -0,0 +1,514 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Non-blocking HTTP client implementation using pycurl.""" + +from __future__ import absolute_import, division, print_function + +import collections +import functools +import logging +import pycurl # type: ignore +import threading +import time +from io import BytesIO + +from tornado import httputil +from tornado import ioloop +from tornado import stack_context + +from tornado.escape import utf8, native_str +from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main + +curl_log = logging.getLogger('tornado.curl_httpclient') + + +class CurlAsyncHTTPClient(AsyncHTTPClient): + def initialize(self, max_clients=10, defaults=None): + super(CurlAsyncHTTPClient, self).initialize(defaults=defaults) + self._multi = pycurl.CurlMulti() + self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) + self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) + self._curls = [self._curl_create() for i in range(max_clients)] + self._free_list = self._curls[:] + self._requests = collections.deque() + self._fds = {} + self._timeout = None + + # libcurl has bugs that sometimes cause it to not report all + # relevant file descriptors and timeouts to TIMERFUNCTION/ + # SOCKETFUNCTION. Mitigate the effects of such bugs by + # forcing a periodic scan of all active requests. + self._force_timeout_callback = ioloop.PeriodicCallback( + self._handle_force_timeout, 1000) + self._force_timeout_callback.start() + + # Work around a bug in libcurl 7.29.0: Some fields in the curl + # multi object are initialized lazily, and its destructor will + # segfault if it is destroyed without having been used. Add + # and remove a dummy handle to make sure everything is + # initialized. + dummy_curl_handle = pycurl.Curl() + self._multi.add_handle(dummy_curl_handle) + self._multi.remove_handle(dummy_curl_handle) + + def close(self): + self._force_timeout_callback.stop() + if self._timeout is not None: + self.io_loop.remove_timeout(self._timeout) + for curl in self._curls: + curl.close() + self._multi.close() + super(CurlAsyncHTTPClient, self).close() + + # Set below properties to None to reduce the reference count of current + # instance, because those properties hold some methods of current + # instance that will case circular reference. + self._force_timeout_callback = None + self._multi = None + + def fetch_impl(self, request, callback): + self._requests.append((request, callback, self.io_loop.time())) + self._process_queue() + self._set_timeout(0) + + def _handle_socket(self, event, fd, multi, data): + """Called by libcurl when it wants to change the file descriptors + it cares about. + """ + event_map = { + pycurl.POLL_NONE: ioloop.IOLoop.NONE, + pycurl.POLL_IN: ioloop.IOLoop.READ, + pycurl.POLL_OUT: ioloop.IOLoop.WRITE, + pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE + } + if event == pycurl.POLL_REMOVE: + if fd in self._fds: + self.io_loop.remove_handler(fd) + del self._fds[fd] + else: + ioloop_event = event_map[event] + # libcurl sometimes closes a socket and then opens a new + # one using the same FD without giving us a POLL_NONE in + # between. This is a problem with the epoll IOLoop, + # because the kernel can tell when a socket is closed and + # removes it from the epoll automatically, causing future + # update_handler calls to fail. Since we can't tell when + # this has happened, always use remove and re-add + # instead of update. + if fd in self._fds: + self.io_loop.remove_handler(fd) + self.io_loop.add_handler(fd, self._handle_events, + ioloop_event) + self._fds[fd] = ioloop_event + + def _set_timeout(self, msecs): + """Called by libcurl to schedule a timeout.""" + if self._timeout is not None: + self.io_loop.remove_timeout(self._timeout) + self._timeout = self.io_loop.add_timeout( + self.io_loop.time() + msecs / 1000.0, self._handle_timeout) + + def _handle_events(self, fd, events): + """Called by IOLoop when there is activity on one of our + file descriptors. + """ + action = 0 + if events & ioloop.IOLoop.READ: + action |= pycurl.CSELECT_IN + if events & ioloop.IOLoop.WRITE: + action |= pycurl.CSELECT_OUT + while True: + try: + ret, num_handles = self._multi.socket_action(fd, action) + except pycurl.error as e: + ret = e.args[0] + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + self._finish_pending_requests() + + def _handle_timeout(self): + """Called by IOLoop when the requested timeout has passed.""" + with stack_context.NullContext(): + self._timeout = None + while True: + try: + ret, num_handles = self._multi.socket_action( + pycurl.SOCKET_TIMEOUT, 0) + except pycurl.error as e: + ret = e.args[0] + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + self._finish_pending_requests() + + # In theory, we shouldn't have to do this because curl will + # call _set_timeout whenever the timeout changes. However, + # sometimes after _handle_timeout we will need to reschedule + # immediately even though nothing has changed from curl's + # perspective. This is because when socket_action is + # called with SOCKET_TIMEOUT, libcurl decides internally which + # timeouts need to be processed by using a monotonic clock + # (where available) while tornado uses python's time.time() + # to decide when timeouts have occurred. When those clocks + # disagree on elapsed time (as they will whenever there is an + # NTP adjustment), tornado might call _handle_timeout before + # libcurl is ready. After each timeout, resync the scheduled + # timeout with libcurl's current state. + new_timeout = self._multi.timeout() + if new_timeout >= 0: + self._set_timeout(new_timeout) + + def _handle_force_timeout(self): + """Called by IOLoop periodically to ask libcurl to process any + events it may have forgotten about. + """ + with stack_context.NullContext(): + while True: + try: + ret, num_handles = self._multi.socket_all() + except pycurl.error as e: + ret = e.args[0] + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + self._finish_pending_requests() + + def _finish_pending_requests(self): + """Process any requests that were completed by the last + call to multi.socket_action. + """ + while True: + num_q, ok_list, err_list = self._multi.info_read() + for curl in ok_list: + self._finish(curl) + for curl, errnum, errmsg in err_list: + self._finish(curl, errnum, errmsg) + if num_q == 0: + break + self._process_queue() + + def _process_queue(self): + with stack_context.NullContext(): + while True: + started = 0 + while self._free_list and self._requests: + started += 1 + curl = self._free_list.pop() + (request, callback, queue_start_time) = self._requests.popleft() + curl.info = { + "headers": httputil.HTTPHeaders(), + "buffer": BytesIO(), + "request": request, + "callback": callback, + "queue_start_time": queue_start_time, + "curl_start_time": time.time(), + "curl_start_ioloop_time": self.io_loop.current().time(), + } + try: + self._curl_setup_request( + curl, request, curl.info["buffer"], + curl.info["headers"]) + except Exception as e: + # If there was an error in setup, pass it on + # to the callback. Note that allowing the + # error to escape here will appear to work + # most of the time since we are still in the + # caller's original stack frame, but when + # _process_queue() is called from + # _finish_pending_requests the exceptions have + # nowhere to go. + self._free_list.append(curl) + callback(HTTPResponse( + request=request, + code=599, + error=e)) + else: + self._multi.add_handle(curl) + + if not started: + break + + def _finish(self, curl, curl_error=None, curl_message=None): + info = curl.info + curl.info = None + self._multi.remove_handle(curl) + self._free_list.append(curl) + buffer = info["buffer"] + if curl_error: + error = CurlError(curl_error, curl_message) + code = error.code + effective_url = None + buffer.close() + buffer = None + else: + error = None + code = curl.getinfo(pycurl.HTTP_CODE) + effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) + buffer.seek(0) + # the various curl timings are documented at + # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html + time_info = dict( + queue=info["curl_start_ioloop_time"] - info["queue_start_time"], + namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), + connect=curl.getinfo(pycurl.CONNECT_TIME), + appconnect=curl.getinfo(pycurl.APPCONNECT_TIME), + pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), + starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), + total=curl.getinfo(pycurl.TOTAL_TIME), + redirect=curl.getinfo(pycurl.REDIRECT_TIME), + ) + try: + info["callback"](HTTPResponse( + request=info["request"], code=code, headers=info["headers"], + buffer=buffer, effective_url=effective_url, error=error, + reason=info['headers'].get("X-Http-Reason", None), + request_time=self.io_loop.time() - info["curl_start_ioloop_time"], + start_time=info["curl_start_time"], + time_info=time_info)) + except Exception: + self.handle_callback_exception(info["callback"]) + + def handle_callback_exception(self, callback): + self.io_loop.handle_callback_exception(callback) + + def _curl_create(self): + curl = pycurl.Curl() + if curl_log.isEnabledFor(logging.DEBUG): + curl.setopt(pycurl.VERBOSE, 1) + curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug) + if hasattr(pycurl, 'PROTOCOLS'): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12) + curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) + curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) + return curl + + def _curl_setup_request(self, curl, request, buffer, headers): + curl.setopt(pycurl.URL, native_str(request.url)) + + # libcurl's magic "Expect: 100-continue" behavior causes delays + # with servers that don't support it (which include, among others, + # Google's OpenID endpoint). Additionally, this behavior has + # a bug in conjunction with the curl_multi_socket_action API + # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976), + # which increases the delays. It's more trouble than it's worth, + # so just turn off the feature (yes, setting Expect: to an empty + # value is the official way to disable this) + if "Expect" not in request.headers: + request.headers["Expect"] = "" + + # libcurl adds Pragma: no-cache by default; disable that too + if "Pragma" not in request.headers: + request.headers["Pragma"] = "" + + curl.setopt(pycurl.HTTPHEADER, + ["%s: %s" % (native_str(k), native_str(v)) + for k, v in request.headers.get_all()]) + + curl.setopt(pycurl.HEADERFUNCTION, + functools.partial(self._curl_header_callback, + headers, request.header_callback)) + if request.streaming_callback: + def write_function(chunk): + self.io_loop.add_callback(request.streaming_callback, chunk) + else: + write_function = buffer.write + curl.setopt(pycurl.WRITEFUNCTION, write_function) + curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) + curl.setopt(pycurl.MAXREDIRS, request.max_redirects) + curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) + curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout)) + if request.user_agent: + curl.setopt(pycurl.USERAGENT, native_str(request.user_agent)) + else: + curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") + if request.network_interface: + curl.setopt(pycurl.INTERFACE, request.network_interface) + if request.decompress_response: + curl.setopt(pycurl.ENCODING, "gzip,deflate") + else: + curl.setopt(pycurl.ENCODING, "none") + if request.proxy_host and request.proxy_port: + curl.setopt(pycurl.PROXY, request.proxy_host) + curl.setopt(pycurl.PROXYPORT, request.proxy_port) + if request.proxy_username: + credentials = httputil.encode_username_password(request.proxy_username, + request.proxy_password) + curl.setopt(pycurl.PROXYUSERPWD, credentials) + + if (request.proxy_auth_mode is None or + request.proxy_auth_mode == "basic"): + curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC) + elif request.proxy_auth_mode == "digest": + curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST) + else: + raise ValueError( + "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode) + else: + curl.setopt(pycurl.PROXY, '') + curl.unsetopt(pycurl.PROXYUSERPWD) + if request.validate_cert: + curl.setopt(pycurl.SSL_VERIFYPEER, 1) + curl.setopt(pycurl.SSL_VERIFYHOST, 2) + else: + curl.setopt(pycurl.SSL_VERIFYPEER, 0) + curl.setopt(pycurl.SSL_VERIFYHOST, 0) + if request.ca_certs is not None: + curl.setopt(pycurl.CAINFO, request.ca_certs) + else: + # There is no way to restore pycurl.CAINFO to its default value + # (Using unsetopt makes it reject all certificates). + # I don't see any way to read the default value from python so it + # can be restored later. We'll have to just leave CAINFO untouched + # if no ca_certs file was specified, and require that if any + # request uses a custom ca_certs file, they all must. + pass + + if request.allow_ipv6 is False: + # Curl behaves reasonably when DNS resolution gives an ipv6 address + # that we can't reach, so allow ipv6 unless the user asks to disable. + curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) + else: + curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) + + # Set the request method through curl's irritating interface which makes + # up names for almost every single method + curl_options = { + "GET": pycurl.HTTPGET, + "POST": pycurl.POST, + "PUT": pycurl.UPLOAD, + "HEAD": pycurl.NOBODY, + } + custom_methods = set(["DELETE", "OPTIONS", "PATCH"]) + for o in curl_options.values(): + curl.setopt(o, False) + if request.method in curl_options: + curl.unsetopt(pycurl.CUSTOMREQUEST) + curl.setopt(curl_options[request.method], True) + elif request.allow_nonstandard_methods or request.method in custom_methods: + curl.setopt(pycurl.CUSTOMREQUEST, request.method) + else: + raise KeyError('unknown method ' + request.method) + + body_expected = request.method in ("POST", "PATCH", "PUT") + body_present = request.body is not None + if not request.allow_nonstandard_methods: + # Some HTTP methods nearly always have bodies while others + # almost never do. Fail in this case unless the user has + # opted out of sanity checks with allow_nonstandard_methods. + if ((body_expected and not body_present) or + (body_present and not body_expected)): + raise ValueError( + 'Body must %sbe None for method %s (unless ' + 'allow_nonstandard_methods is true)' % + ('not ' if body_expected else '', request.method)) + + if body_expected or body_present: + if request.method == "GET": + # Even with `allow_nonstandard_methods` we disallow + # GET with a body (because libcurl doesn't allow it + # unless we use CUSTOMREQUEST). While the spec doesn't + # forbid clients from sending a body, it arguably + # disallows the server from doing anything with them. + raise ValueError('Body must be None for GET request') + request_buffer = BytesIO(utf8(request.body or '')) + + def ioctl(cmd): + if cmd == curl.IOCMD_RESTARTREAD: + request_buffer.seek(0) + curl.setopt(pycurl.READFUNCTION, request_buffer.read) + curl.setopt(pycurl.IOCTLFUNCTION, ioctl) + if request.method == "POST": + curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or '')) + else: + curl.setopt(pycurl.UPLOAD, True) + curl.setopt(pycurl.INFILESIZE, len(request.body or '')) + + if request.auth_username is not None: + if request.auth_mode is None or request.auth_mode == "basic": + curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) + elif request.auth_mode == "digest": + curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) + else: + raise ValueError("Unsupported auth_mode %s" % request.auth_mode) + + userpwd = httputil.encode_username_password(request.auth_username, + request.auth_password) + curl.setopt(pycurl.USERPWD, userpwd) + curl_log.debug("%s %s (username: %r)", request.method, request.url, + request.auth_username) + else: + curl.unsetopt(pycurl.USERPWD) + curl_log.debug("%s %s", request.method, request.url) + + if request.client_cert is not None: + curl.setopt(pycurl.SSLCERT, request.client_cert) + + if request.client_key is not None: + curl.setopt(pycurl.SSLKEY, request.client_key) + + if request.ssl_options is not None: + raise ValueError("ssl_options not supported in curl_httpclient") + + if threading.activeCount() > 1: + # libcurl/pycurl is not thread-safe by default. When multiple threads + # are used, signals should be disabled. This has the side effect + # of disabling DNS timeouts in some environments (when libcurl is + # not linked against ares), so we don't do it when there is only one + # thread. Applications that use many short-lived threads may need + # to set NOSIGNAL manually in a prepare_curl_callback since + # there may not be any other threads running at the time we call + # threading.activeCount. + curl.setopt(pycurl.NOSIGNAL, 1) + if request.prepare_curl_callback is not None: + request.prepare_curl_callback(curl) + + def _curl_header_callback(self, headers, header_callback, header_line): + header_line = native_str(header_line.decode('latin1')) + if header_callback is not None: + self.io_loop.add_callback(header_callback, header_line) + # header_line as returned by curl includes the end-of-line characters. + # whitespace at the start should be preserved to allow multi-line headers + header_line = header_line.rstrip() + if header_line.startswith("HTTP/"): + headers.clear() + try: + (__, __, reason) = httputil.parse_response_start_line(header_line) + header_line = "X-Http-Reason: %s" % reason + except httputil.HTTPInputError: + return + if not header_line: + return + headers.parse_line(header_line) + + def _curl_debug(self, debug_type, debug_msg): + debug_types = ('I', '<', '>', '<', '>') + if debug_type == 0: + debug_msg = native_str(debug_msg) + curl_log.debug('%s', debug_msg.strip()) + elif debug_type in (1, 2): + debug_msg = native_str(debug_msg) + for line in debug_msg.splitlines(): + curl_log.debug('%s %s', debug_types[debug_type], line) + elif debug_type == 4: + curl_log.debug('%s %r', debug_types[debug_type], debug_msg) + + +class CurlError(HTTPError): + def __init__(self, errno, message): + HTTPError.__init__(self, 599, message) + self.errno = errno + + +if __name__ == "__main__": + AsyncHTTPClient.configure(CurlAsyncHTTPClient) + main() diff --git a/server/www/packages/packages-windows/x86/tornado/escape.py b/server/www/packages/packages-windows/x86/tornado/escape.py new file mode 100644 index 0000000..a79ece6 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/escape.py @@ -0,0 +1,399 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Escaping/unescaping methods for HTML, JSON, URLs, and others. + +Also includes a few other miscellaneous string manipulation functions that +have crept in over time. +""" + +from __future__ import absolute_import, division, print_function + +import json +import re + +from tornado.util import PY3, unicode_type, basestring_type + +if PY3: + from urllib.parse import parse_qs as _parse_qs + import html.entities as htmlentitydefs + import urllib.parse as urllib_parse + unichr = chr +else: + from urlparse import parse_qs as _parse_qs + import htmlentitydefs + import urllib as urllib_parse + +try: + import typing # noqa +except ImportError: + pass + + +_XHTML_ESCAPE_RE = re.compile('[&<>"\']') +_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"', + '\'': '''} + + +def xhtml_escape(value): + """Escapes a string so it is valid within HTML or XML. + + Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. + When used in attribute values the escaped strings must be enclosed + in quotes. + + .. versionchanged:: 3.2 + + Added the single quote to the list of escaped characters. + """ + return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)], + to_basestring(value)) + + +def xhtml_unescape(value): + """Un-escapes an XML-escaped string.""" + return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) + + +# The fact that json_encode wraps json.dumps is an implementation detail. +# Please see https://github.com/tornadoweb/tornado/pull/706 +# before sending a pull request that adds **kwargs to this function. +def json_encode(value): + """JSON-encodes the given Python object.""" + # JSON permits but does not require forward slashes to be escaped. + # This is useful when json data is emitted in a tags from prematurely terminating + # the javascript. Some json libraries do this escaping by default, + # although python's standard library does not, so we do it here. + # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped + return json.dumps(value).replace("typing.Union[bytes,None] + """Converts a string argument to a byte string. + + If the argument is already a byte string or None, it is returned unchanged. + Otherwise it must be a unicode string and is encoded as utf8. + """ + if isinstance(value, _UTF8_TYPES): + return value + if not isinstance(value, unicode_type): + raise TypeError( + "Expected bytes, unicode, or None; got %r" % type(value) + ) + return value.encode("utf-8") + + +_TO_UNICODE_TYPES = (unicode_type, type(None)) + + +def to_unicode(value): + """Converts a string argument to a unicode string. + + If the argument is already a unicode string or None, it is returned + unchanged. Otherwise it must be a byte string and is decoded as utf8. + """ + if isinstance(value, _TO_UNICODE_TYPES): + return value + if not isinstance(value, bytes): + raise TypeError( + "Expected bytes, unicode, or None; got %r" % type(value) + ) + return value.decode("utf-8") + + +# to_unicode was previously named _unicode not because it was private, +# but to avoid conflicts with the built-in unicode() function/type +_unicode = to_unicode + +# When dealing with the standard library across python 2 and 3 it is +# sometimes useful to have a direct conversion to the native string type +if str is unicode_type: + native_str = to_unicode +else: + native_str = utf8 + +_BASESTRING_TYPES = (basestring_type, type(None)) + + +def to_basestring(value): + """Converts a string argument to a subclass of basestring. + + In python2, byte and unicode strings are mostly interchangeable, + so functions that deal with a user-supplied argument in combination + with ascii string constants can use either and should return the type + the user supplied. In python3, the two types are not interchangeable, + so this method is needed to convert byte strings to unicode. + """ + if isinstance(value, _BASESTRING_TYPES): + return value + if not isinstance(value, bytes): + raise TypeError( + "Expected bytes, unicode, or None; got %r" % type(value) + ) + return value.decode("utf-8") + + +def recursive_unicode(obj): + """Walks a simple data structure, converting byte strings to unicode. + + Supports lists, tuples, and dictionaries. + """ + if isinstance(obj, dict): + return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items()) + elif isinstance(obj, list): + return list(recursive_unicode(i) for i in obj) + elif isinstance(obj, tuple): + return tuple(recursive_unicode(i) for i in obj) + elif isinstance(obj, bytes): + return to_unicode(obj) + else: + return obj + + +# I originally used the regex from +# http://daringfireball.net/2010/07/improved_regex_for_matching_urls +# but it gets all exponential on certain patterns (such as too many trailing +# dots), causing the regex matcher to never return. +# This regex should avoid those problems. +# Use to_unicode instead of tornado.util.u - we don't want backslashes getting +# processed as escapes. +_URL_RE = re.compile(to_unicode( + r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""" # noqa: E501 +)) + + +def linkify(text, shorten=False, extra_params="", + require_protocol=False, permitted_protocols=["http", "https"]): + """Converts plain text into HTML with links. + + For example: ``linkify("Hello http://tornadoweb.org!")`` would return + ``Hello http://tornadoweb.org!`` + + Parameters: + + * ``shorten``: Long urls will be shortened for display. + + * ``extra_params``: Extra text to include in the link tag, or a callable + taking the link as an argument and returning the extra text + e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, + or:: + + def extra_params_cb(url): + if url.startswith("http://example.com"): + return 'class="internal"' + else: + return 'class="external" rel="nofollow"' + linkify(text, extra_params=extra_params_cb) + + * ``require_protocol``: Only linkify urls which include a protocol. If + this is False, urls such as www.facebook.com will also be linkified. + + * ``permitted_protocols``: List (or set) of protocols which should be + linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", + "mailto"])``. It is very unsafe to include protocols such as + ``javascript``. + """ + if extra_params and not callable(extra_params): + extra_params = " " + extra_params.strip() + + def make_link(m): + url = m.group(1) + proto = m.group(2) + if require_protocol and not proto: + return url # not protocol, no linkify + + if proto and proto not in permitted_protocols: + return url # bad protocol, no linkify + + href = m.group(1) + if not proto: + href = "http://" + href # no proto specified, use http + + if callable(extra_params): + params = " " + extra_params(href).strip() + else: + params = extra_params + + # clip long urls. max_len is just an approximation + max_len = 30 + if shorten and len(url) > max_len: + before_clip = url + if proto: + proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for : + else: + proto_len = 0 + + parts = url[proto_len:].split("/") + if len(parts) > 1: + # Grab the whole host part plus the first bit of the path + # The path is usually not that interesting once shortened + # (no more slug, etc), so it really just provides a little + # extra indication of shortening. + url = url[:proto_len] + parts[0] + "/" + \ + parts[1][:8].split('?')[0].split('.')[0] + + if len(url) > max_len * 1.5: # still too long + url = url[:max_len] + + if url != before_clip: + amp = url.rfind('&') + # avoid splitting html char entities + if amp > max_len - 5: + url = url[:amp] + url += "..." + + if len(url) >= len(before_clip): + url = before_clip + else: + # full url is visible on mouse-over (for those who don't + # have a status bar, such as Safari by default) + params += ' title="%s"' % href + + return u'%s' % (href, params, url) + + # First HTML-escape so that our strings are all safe. + # The regex is modified to avoid character entites other than & so + # that we won't pick up ", etc. + text = _unicode(xhtml_escape(text)) + return _URL_RE.sub(make_link, text) + + +def _convert_entity(m): + if m.group(1) == "#": + try: + if m.group(2)[:1].lower() == 'x': + return unichr(int(m.group(2)[1:], 16)) + else: + return unichr(int(m.group(2))) + except ValueError: + return "&#%s;" % m.group(2) + try: + return _HTML_UNICODE_MAP[m.group(2)] + except KeyError: + return "&%s;" % m.group(2) + + +def _build_unicode_map(): + unicode_map = {} + for name, value in htmlentitydefs.name2codepoint.items(): + unicode_map[name] = unichr(value) + return unicode_map + + +_HTML_UNICODE_MAP = _build_unicode_map() diff --git a/server/www/packages/packages-windows/x86/tornado/gen.py b/server/www/packages/packages-windows/x86/tornado/gen.py new file mode 100644 index 0000000..3556374 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/gen.py @@ -0,0 +1,1367 @@ +"""``tornado.gen`` implements generator-based coroutines. + +.. note:: + + The "decorator and generator" approach in this module is a + precursor to native coroutines (using ``async def`` and ``await``) + which were introduced in Python 3.5. Applications that do not + require compatibility with older versions of Python should use + native coroutines instead. Some parts of this module are still + useful with native coroutines, notably `multi`, `sleep`, + `WaitIterator`, and `with_timeout`. Some of these functions have + counterparts in the `asyncio` module which may be used as well, + although the two may not necessarily be 100% compatible. + +Coroutines provide an easier way to work in an asynchronous +environment than chaining callbacks. Code using coroutines is +technically asynchronous, but it is written as a single generator +instead of a collection of separate functions. + +For example, the following callback-based asynchronous handler: + +.. testcode:: + + class AsyncHandler(RequestHandler): + @asynchronous + def get(self): + http_client = AsyncHTTPClient() + http_client.fetch("http://example.com", + callback=self.on_fetch) + + def on_fetch(self, response): + do_something_with_response(response) + self.render("template.html") + +.. testoutput:: + :hide: + +could be written with ``gen`` as: + +.. testcode:: + + class GenAsyncHandler(RequestHandler): + @gen.coroutine + def get(self): + http_client = AsyncHTTPClient() + response = yield http_client.fetch("http://example.com") + do_something_with_response(response) + self.render("template.html") + +.. testoutput:: + :hide: + +Most asynchronous functions in Tornado return a `.Future`; +yielding this object returns its ``Future.result``. + +You can also yield a list or dict of ``Futures``, which will be +started at the same time and run in parallel; a list or dict of results will +be returned when they are all finished: + +.. testcode:: + + @gen.coroutine + def get(self): + http_client = AsyncHTTPClient() + response1, response2 = yield [http_client.fetch(url1), + http_client.fetch(url2)] + response_dict = yield dict(response3=http_client.fetch(url3), + response4=http_client.fetch(url4)) + response3 = response_dict['response3'] + response4 = response_dict['response4'] + +.. testoutput:: + :hide: + +If the `~functools.singledispatch` library is available (standard in +Python 3.4, available via the `singledispatch +`_ package on older +versions), additional types of objects may be yielded. Tornado includes +support for ``asyncio.Future`` and Twisted's ``Deferred`` class when +``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported. +See the `convert_yielded` function to extend this mechanism. + +.. versionchanged:: 3.2 + Dict support added. + +.. versionchanged:: 4.1 + Support added for yielding ``asyncio`` Futures and Twisted Deferreds + via ``singledispatch``. + +""" +from __future__ import absolute_import, division, print_function + +import collections +import functools +import itertools +import os +import sys +import types +import warnings + +from tornado.concurrent import (Future, is_future, chain_future, future_set_exc_info, + future_add_done_callback, future_set_result_unless_cancelled) +from tornado.ioloop import IOLoop +from tornado.log import app_log +from tornado import stack_context +from tornado.util import PY3, raise_exc_info, TimeoutError + +try: + try: + # py34+ + from functools import singledispatch # type: ignore + except ImportError: + from singledispatch import singledispatch # backport +except ImportError: + # In most cases, singledispatch is required (to avoid + # difficult-to-diagnose problems in which the functionality + # available differs depending on which invisble packages are + # installed). However, in Google App Engine third-party + # dependencies are more trouble so we allow this module to be + # imported without it. + if 'APPENGINE_RUNTIME' not in os.environ: + raise + singledispatch = None + +try: + try: + # py35+ + from collections.abc import Generator as GeneratorType # type: ignore + except ImportError: + from backports_abc import Generator as GeneratorType # type: ignore + + try: + # py35+ + from inspect import isawaitable # type: ignore + except ImportError: + from backports_abc import isawaitable +except ImportError: + if 'APPENGINE_RUNTIME' not in os.environ: + raise + from types import GeneratorType + + def isawaitable(x): # type: ignore + return False + +if PY3: + import builtins +else: + import __builtin__ as builtins + + +class KeyReuseError(Exception): + pass + + +class UnknownKeyError(Exception): + pass + + +class LeakedCallbackError(Exception): + pass + + +class BadYieldError(Exception): + pass + + +class ReturnValueIgnoredError(Exception): + pass + + +def _value_from_stopiteration(e): + try: + # StopIteration has a value attribute beginning in py33. + # So does our Return class. + return e.value + except AttributeError: + pass + try: + # Cython backports coroutine functionality by putting the value in + # e.args[0]. + return e.args[0] + except (AttributeError, IndexError): + return None + + +def _create_future(): + future = Future() + # Fixup asyncio debug info by removing extraneous stack entries + source_traceback = getattr(future, "_source_traceback", ()) + while source_traceback: + # Each traceback entry is equivalent to a + # (filename, self.lineno, self.name, self.line) tuple + filename = source_traceback[-1][0] + if filename == __file__: + del source_traceback[-1] + else: + break + return future + + +def engine(func): + """Callback-oriented decorator for asynchronous generators. + + This is an older interface; for new code that does not need to be + compatible with versions of Tornado older than 3.0 the + `coroutine` decorator is recommended instead. + + This decorator is similar to `coroutine`, except it does not + return a `.Future` and the ``callback`` argument is not treated + specially. + + In most cases, functions decorated with `engine` should take + a ``callback`` argument and invoke it with their result when + they are finished. One notable exception is the + `~tornado.web.RequestHandler` :ref:`HTTP verb methods `, + which use ``self.finish()`` in place of a callback argument. + + .. deprecated:: 5.1 + + This decorator will be removed in 6.0. Use `coroutine` or + ``async def`` instead. + """ + warnings.warn("gen.engine is deprecated, use gen.coroutine or async def instead", + DeprecationWarning) + func = _make_coroutine_wrapper(func, replace_callback=False) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + future = func(*args, **kwargs) + + def final_callback(future): + if future.result() is not None: + raise ReturnValueIgnoredError( + "@gen.engine functions cannot return values: %r" % + (future.result(),)) + # The engine interface doesn't give us any way to return + # errors but to raise them into the stack context. + # Save the stack context here to use when the Future has resolved. + future_add_done_callback(future, stack_context.wrap(final_callback)) + return wrapper + + +def coroutine(func): + """Decorator for asynchronous generators. + + Any generator that yields objects from this module must be wrapped + in either this decorator or `engine`. + + Coroutines may "return" by raising the special exception + `Return(value) `. In Python 3.3+, it is also possible for + the function to simply use the ``return value`` statement (prior to + Python 3.3 generators were not allowed to also return values). + In all versions of Python a coroutine that simply wishes to exit + early may use the ``return`` statement without a value. + + Functions with this decorator return a `.Future`. Additionally, + they may be called with a ``callback`` keyword argument, which + will be invoked with the future's result when it resolves. If the + coroutine fails, the callback will not be run and an exception + will be raised into the surrounding `.StackContext`. The + ``callback`` argument is not visible inside the decorated + function; it is handled by the decorator itself. + + .. warning:: + + When exceptions occur inside a coroutine, the exception + information will be stored in the `.Future` object. You must + examine the result of the `.Future` object, or the exception + may go unnoticed by your code. This means yielding the function + if called from another coroutine, using something like + `.IOLoop.run_sync` for top-level calls, or passing the `.Future` + to `.IOLoop.add_future`. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + return _make_coroutine_wrapper(func, replace_callback=True) + + +def _make_coroutine_wrapper(func, replace_callback): + """The inner workings of ``@gen.coroutine`` and ``@gen.engine``. + + The two decorators differ in their treatment of the ``callback`` + argument, so we cannot simply implement ``@engine`` in terms of + ``@coroutine``. + """ + # On Python 3.5, set the coroutine flag on our generator, to allow it + # to be used with 'await'. + wrapped = func + if hasattr(types, 'coroutine'): + func = types.coroutine(func) + + @functools.wraps(wrapped) + def wrapper(*args, **kwargs): + future = _create_future() + + if replace_callback and 'callback' in kwargs: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning, stacklevel=2) + callback = kwargs.pop('callback') + IOLoop.current().add_future( + future, lambda future: callback(future.result())) + + try: + result = func(*args, **kwargs) + except (Return, StopIteration) as e: + result = _value_from_stopiteration(e) + except Exception: + future_set_exc_info(future, sys.exc_info()) + try: + return future + finally: + # Avoid circular references + future = None + else: + if isinstance(result, GeneratorType): + # Inline the first iteration of Runner.run. This lets us + # avoid the cost of creating a Runner when the coroutine + # never actually yields, which in turn allows us to + # use "optional" coroutines in critical path code without + # performance penalty for the synchronous case. + try: + orig_stack_contexts = stack_context._state.contexts + yielded = next(result) + if stack_context._state.contexts is not orig_stack_contexts: + yielded = _create_future() + yielded.set_exception( + stack_context.StackContextInconsistentError( + 'stack_context inconsistency (probably caused ' + 'by yield within a "with StackContext" block)')) + except (StopIteration, Return) as e: + future_set_result_unless_cancelled(future, _value_from_stopiteration(e)) + except Exception: + future_set_exc_info(future, sys.exc_info()) + else: + # Provide strong references to Runner objects as long + # as their result future objects also have strong + # references (typically from the parent coroutine's + # Runner). This keeps the coroutine's Runner alive. + # We do this by exploiting the public API + # add_done_callback() instead of putting a private + # attribute on the Future. + # (Github issues #1769, #2229). + runner = Runner(result, future, yielded) + future.add_done_callback(lambda _: runner) + yielded = None + try: + return future + finally: + # Subtle memory optimization: if next() raised an exception, + # the future's exc_info contains a traceback which + # includes this stack frame. This creates a cycle, + # which will be collected at the next full GC but has + # been shown to greatly increase memory usage of + # benchmarks (relative to the refcount-based scheme + # used in the absence of cycles). We can avoid the + # cycle by clearing the local variable after we return it. + future = None + future_set_result_unless_cancelled(future, result) + return future + + wrapper.__wrapped__ = wrapped + wrapper.__tornado_coroutine__ = True + return wrapper + + +def is_coroutine_function(func): + """Return whether *func* is a coroutine function, i.e. a function + wrapped with `~.gen.coroutine`. + + .. versionadded:: 4.5 + """ + return getattr(func, '__tornado_coroutine__', False) + + +class Return(Exception): + """Special exception to return a value from a `coroutine`. + + If this exception is raised, its value argument is used as the + result of the coroutine:: + + @gen.coroutine + def fetch_json(url): + response = yield AsyncHTTPClient().fetch(url) + raise gen.Return(json_decode(response.body)) + + In Python 3.3, this exception is no longer necessary: the ``return`` + statement can be used directly to return a value (previously + ``yield`` and ``return`` with a value could not be combined in the + same function). + + By analogy with the return statement, the value argument is optional, + but it is never necessary to ``raise gen.Return()``. The ``return`` + statement can be used with no arguments instead. + """ + def __init__(self, value=None): + super(Return, self).__init__() + self.value = value + # Cython recognizes subclasses of StopIteration with a .args tuple. + self.args = (value,) + + +class WaitIterator(object): + """Provides an iterator to yield the results of futures as they finish. + + Yielding a set of futures like this: + + ``results = yield [future1, future2]`` + + pauses the coroutine until both ``future1`` and ``future2`` + return, and then restarts the coroutine with the results of both + futures. If either future is an exception, the expression will + raise that exception and all the results will be lost. + + If you need to get the result of each future as soon as possible, + or if you need the result of some futures even if others produce + errors, you can use ``WaitIterator``:: + + wait_iterator = gen.WaitIterator(future1, future2) + while not wait_iterator.done(): + try: + result = yield wait_iterator.next() + except Exception as e: + print("Error {} from {}".format(e, wait_iterator.current_future)) + else: + print("Result {} received from {} at {}".format( + result, wait_iterator.current_future, + wait_iterator.current_index)) + + Because results are returned as soon as they are available the + output from the iterator *will not be in the same order as the + input arguments*. If you need to know which future produced the + current result, you can use the attributes + ``WaitIterator.current_future``, or ``WaitIterator.current_index`` + to get the index of the future from the input list. (if keyword + arguments were used in the construction of the `WaitIterator`, + ``current_index`` will use the corresponding keyword). + + On Python 3.5, `WaitIterator` implements the async iterator + protocol, so it can be used with the ``async for`` statement (note + that in this version the entire iteration is aborted if any value + raises an exception, while the previous example can continue past + individual errors):: + + async for result in gen.WaitIterator(future1, future2): + print("Result {} received from {} at {}".format( + result, wait_iterator.current_future, + wait_iterator.current_index)) + + .. versionadded:: 4.1 + + .. versionchanged:: 4.3 + Added ``async for`` support in Python 3.5. + + """ + def __init__(self, *args, **kwargs): + if args and kwargs: + raise ValueError( + "You must provide args or kwargs, not both") + + if kwargs: + self._unfinished = dict((f, k) for (k, f) in kwargs.items()) + futures = list(kwargs.values()) + else: + self._unfinished = dict((f, i) for (i, f) in enumerate(args)) + futures = args + + self._finished = collections.deque() + self.current_index = self.current_future = None + self._running_future = None + + for future in futures: + future_add_done_callback(future, self._done_callback) + + def done(self): + """Returns True if this iterator has no more results.""" + if self._finished or self._unfinished: + return False + # Clear the 'current' values when iteration is done. + self.current_index = self.current_future = None + return True + + def next(self): + """Returns a `.Future` that will yield the next available result. + + Note that this `.Future` will not be the same object as any of + the inputs. + """ + self._running_future = Future() + + if self._finished: + self._return_result(self._finished.popleft()) + + return self._running_future + + def _done_callback(self, done): + if self._running_future and not self._running_future.done(): + self._return_result(done) + else: + self._finished.append(done) + + def _return_result(self, done): + """Called set the returned future's state that of the future + we yielded, and set the current future for the iterator. + """ + chain_future(done, self._running_future) + + self.current_future = done + self.current_index = self._unfinished.pop(done) + + def __aiter__(self): + return self + + def __anext__(self): + if self.done(): + # Lookup by name to silence pyflakes on older versions. + raise getattr(builtins, 'StopAsyncIteration')() + return self.next() + + +class YieldPoint(object): + """Base class for objects that may be yielded from the generator. + + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. This class and all its subclasses + will be removed in 6.0 + """ + def __init__(self): + warnings.warn("YieldPoint is deprecated, use Futures instead", + DeprecationWarning) + + def start(self, runner): + """Called by the runner after the generator has yielded. + + No other methods will be called on this object before ``start``. + """ + raise NotImplementedError() + + def is_ready(self): + """Called by the runner to determine whether to resume the generator. + + Returns a boolean; may be called more than once. + """ + raise NotImplementedError() + + def get_result(self): + """Returns the value to use as the result of the yield expression. + + This method will only be called once, and only after `is_ready` + has returned true. + """ + raise NotImplementedError() + + +class Callback(YieldPoint): + """Returns a callable object that will allow a matching `Wait` to proceed. + + The key may be any value suitable for use as a dictionary key, and is + used to match ``Callbacks`` to their corresponding ``Waits``. The key + must be unique among outstanding callbacks within a single run of the + generator function, but may be reused across different runs of the same + function (so constants generally work fine). + + The callback may be called with zero or one arguments; if an argument + is given it will be returned by `Wait`. + + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. This class will be removed in 6.0. + """ + def __init__(self, key): + warnings.warn("gen.Callback is deprecated, use Futures instead", + DeprecationWarning) + self.key = key + + def start(self, runner): + self.runner = runner + runner.register_callback(self.key) + + def is_ready(self): + return True + + def get_result(self): + return self.runner.result_callback(self.key) + + +class Wait(YieldPoint): + """Returns the argument passed to the result of a previous `Callback`. + + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. This class will be removed in 6.0. + """ + def __init__(self, key): + warnings.warn("gen.Wait is deprecated, use Futures instead", + DeprecationWarning) + self.key = key + + def start(self, runner): + self.runner = runner + + def is_ready(self): + return self.runner.is_ready(self.key) + + def get_result(self): + return self.runner.pop_result(self.key) + + +class WaitAll(YieldPoint): + """Returns the results of multiple previous `Callbacks `. + + The argument is a sequence of `Callback` keys, and the result is + a list of results in the same order. + + `WaitAll` is equivalent to yielding a list of `Wait` objects. + + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. This class will be removed in 6.0. + """ + def __init__(self, keys): + warnings.warn("gen.WaitAll is deprecated, use gen.multi instead", + DeprecationWarning) + self.keys = keys + + def start(self, runner): + self.runner = runner + + def is_ready(self): + return all(self.runner.is_ready(key) for key in self.keys) + + def get_result(self): + return [self.runner.pop_result(key) for key in self.keys] + + +def Task(func, *args, **kwargs): + """Adapts a callback-based asynchronous function for use in coroutines. + + Takes a function (and optional additional arguments) and runs it with + those arguments plus a ``callback`` keyword argument. The argument passed + to the callback is returned as the result of the yield expression. + + .. versionchanged:: 4.0 + ``gen.Task`` is now a function that returns a `.Future`, instead of + a subclass of `YieldPoint`. It still behaves the same way when + yielded. + + .. deprecated:: 5.1 + This function is deprecated and will be removed in 6.0. + """ + warnings.warn("gen.Task is deprecated, use Futures instead", + DeprecationWarning) + future = _create_future() + + def handle_exception(typ, value, tb): + if future.done(): + return False + future_set_exc_info(future, (typ, value, tb)) + return True + + def set_result(result): + if future.done(): + return + future_set_result_unless_cancelled(future, result) + with stack_context.ExceptionStackContext(handle_exception): + func(*args, callback=_argument_adapter(set_result), **kwargs) + return future + + +class YieldFuture(YieldPoint): + def __init__(self, future): + """Adapts a `.Future` to the `YieldPoint` interface. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. deprecated:: 5.1 + This class will be removed in 6.0. + """ + warnings.warn("YieldFuture is deprecated, use Futures instead", + DeprecationWarning) + self.future = future + self.io_loop = IOLoop.current() + + def start(self, runner): + if not self.future.done(): + self.runner = runner + self.key = object() + runner.register_callback(self.key) + self.io_loop.add_future(self.future, runner.result_callback(self.key)) + else: + self.runner = None + self.result_fn = self.future.result + + def is_ready(self): + if self.runner is not None: + return self.runner.is_ready(self.key) + else: + return True + + def get_result(self): + if self.runner is not None: + return self.runner.pop_result(self.key).result() + else: + return self.result_fn() + + +def _contains_yieldpoint(children): + """Returns True if ``children`` contains any YieldPoints. + + ``children`` may be a dict or a list, as used by `MultiYieldPoint` + and `multi_future`. + """ + if isinstance(children, dict): + return any(isinstance(i, YieldPoint) for i in children.values()) + if isinstance(children, list): + return any(isinstance(i, YieldPoint) for i in children) + return False + + +def multi(children, quiet_exceptions=()): + """Runs multiple asynchronous operations in parallel. + + ``children`` may either be a list or a dict whose values are + yieldable objects. ``multi()`` returns a new yieldable + object that resolves to a parallel structure containing their + results. If ``children`` is a list, the result is a list of + results in the same order; if it is a dict, the result is a dict + with the same keys. + + That is, ``results = yield multi(list_of_futures)`` is equivalent + to:: + + results = [] + for future in list_of_futures: + results.append(yield future) + + If any children raise exceptions, ``multi()`` will raise the first + one. All others will be logged, unless they are of types + contained in the ``quiet_exceptions`` argument. + + If any of the inputs are `YieldPoints `, the returned + yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`. + This means that the result of `multi` can be used in a native + coroutine if and only if all of its children can be. + + In a ``yield``-based coroutine, it is not normally necessary to + call this function directly, since the coroutine runner will + do it automatically when a list or dict is yielded. However, + it is necessary in ``await``-based coroutines, or to pass + the ``quiet_exceptions`` argument. + + This function is available under the names ``multi()`` and ``Multi()`` + for historical reasons. + + Cancelling a `.Future` returned by ``multi()`` does not cancel its + children. `asyncio.gather` is similar to ``multi()``, but it does + cancel its children. + + .. versionchanged:: 4.2 + If multiple yieldables fail, any exceptions after the first + (which is raised) will be logged. Added the ``quiet_exceptions`` + argument to suppress this logging for selected exception types. + + .. versionchanged:: 4.3 + Replaced the class ``Multi`` and the function ``multi_future`` + with a unified function ``multi``. Added support for yieldables + other than `YieldPoint` and `.Future`. + + """ + if _contains_yieldpoint(children): + return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions) + else: + return multi_future(children, quiet_exceptions=quiet_exceptions) + + +Multi = multi + + +class MultiYieldPoint(YieldPoint): + """Runs multiple asynchronous operations in parallel. + + This class is similar to `multi`, but it always creates a stack + context even when no children require it. It is not compatible with + native coroutines. + + .. versionchanged:: 4.2 + If multiple ``YieldPoints`` fail, any exceptions after the first + (which is raised) will be logged. Added the ``quiet_exceptions`` + argument to suppress this logging for selected exception types. + + .. versionchanged:: 4.3 + Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi`` + remains as an alias for the equivalent `multi` function. + + .. deprecated:: 4.3 + Use `multi` instead. This class will be removed in 6.0. + """ + def __init__(self, children, quiet_exceptions=()): + warnings.warn("MultiYieldPoint is deprecated, use Futures instead", + DeprecationWarning) + self.keys = None + if isinstance(children, dict): + self.keys = list(children.keys()) + children = children.values() + self.children = [] + for i in children: + if not isinstance(i, YieldPoint): + i = convert_yielded(i) + if is_future(i): + i = YieldFuture(i) + self.children.append(i) + assert all(isinstance(i, YieldPoint) for i in self.children) + self.unfinished_children = set(self.children) + self.quiet_exceptions = quiet_exceptions + + def start(self, runner): + for i in self.children: + i.start(runner) + + def is_ready(self): + finished = list(itertools.takewhile( + lambda i: i.is_ready(), self.unfinished_children)) + self.unfinished_children.difference_update(finished) + return not self.unfinished_children + + def get_result(self): + result_list = [] + exc_info = None + for f in self.children: + try: + result_list.append(f.get_result()) + except Exception as e: + if exc_info is None: + exc_info = sys.exc_info() + else: + if not isinstance(e, self.quiet_exceptions): + app_log.error("Multiple exceptions in yield list", + exc_info=True) + if exc_info is not None: + raise_exc_info(exc_info) + if self.keys is not None: + return dict(zip(self.keys, result_list)) + else: + return list(result_list) + + +def multi_future(children, quiet_exceptions=()): + """Wait for multiple asynchronous futures in parallel. + + This function is similar to `multi`, but does not support + `YieldPoints `. + + .. versionadded:: 4.0 + + .. versionchanged:: 4.2 + If multiple ``Futures`` fail, any exceptions after the first (which is + raised) will be logged. Added the ``quiet_exceptions`` + argument to suppress this logging for selected exception types. + + .. deprecated:: 4.3 + Use `multi` instead. + """ + if isinstance(children, dict): + keys = list(children.keys()) + children = children.values() + else: + keys = None + children = list(map(convert_yielded, children)) + assert all(is_future(i) or isinstance(i, _NullFuture) for i in children) + unfinished_children = set(children) + + future = _create_future() + if not children: + future_set_result_unless_cancelled(future, + {} if keys is not None else []) + + def callback(f): + unfinished_children.remove(f) + if not unfinished_children: + result_list = [] + for f in children: + try: + result_list.append(f.result()) + except Exception as e: + if future.done(): + if not isinstance(e, quiet_exceptions): + app_log.error("Multiple exceptions in yield list", + exc_info=True) + else: + future_set_exc_info(future, sys.exc_info()) + if not future.done(): + if keys is not None: + future_set_result_unless_cancelled(future, + dict(zip(keys, result_list))) + else: + future_set_result_unless_cancelled(future, result_list) + + listening = set() + for f in children: + if f not in listening: + listening.add(f) + future_add_done_callback(f, callback) + return future + + +def maybe_future(x): + """Converts ``x`` into a `.Future`. + + If ``x`` is already a `.Future`, it is simply returned; otherwise + it is wrapped in a new `.Future`. This is suitable for use as + ``result = yield gen.maybe_future(f())`` when you don't know whether + ``f()`` returns a `.Future` or not. + + .. deprecated:: 4.3 + This function only handles ``Futures``, not other yieldable objects. + Instead of `maybe_future`, check for the non-future result types + you expect (often just ``None``), and ``yield`` anything unknown. + """ + if is_future(x): + return x + else: + fut = _create_future() + fut.set_result(x) + return fut + + +def with_timeout(timeout, future, quiet_exceptions=()): + """Wraps a `.Future` (or other yieldable object) in a timeout. + + Raises `tornado.util.TimeoutError` if the input future does not + complete before ``timeout``, which may be specified in any form + allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or + an absolute time relative to `.IOLoop.time`) + + If the wrapped `.Future` fails after it has timed out, the exception + will be logged unless it is of a type contained in ``quiet_exceptions`` + (which may be an exception type or a sequence of types). + + Does not support `YieldPoint` subclasses. + + The wrapped `.Future` is not canceled when the timeout expires, + permitting it to be reused. `asyncio.wait_for` is similar to this + function but it does cancel the wrapped `.Future` on timeout. + + .. versionadded:: 4.0 + + .. versionchanged:: 4.1 + Added the ``quiet_exceptions`` argument and the logging of unhandled + exceptions. + + .. versionchanged:: 4.4 + Added support for yieldable objects other than `.Future`. + + """ + # TODO: allow YieldPoints in addition to other yieldables? + # Tricky to do with stack_context semantics. + # + # It's tempting to optimize this by cancelling the input future on timeout + # instead of creating a new one, but A) we can't know if we are the only + # one waiting on the input future, so cancelling it might disrupt other + # callers and B) concurrent futures can only be cancelled while they are + # in the queue, so cancellation cannot reliably bound our waiting time. + future = convert_yielded(future) + result = _create_future() + chain_future(future, result) + io_loop = IOLoop.current() + + def error_callback(future): + try: + future.result() + except Exception as e: + if not isinstance(e, quiet_exceptions): + app_log.error("Exception in Future %r after timeout", + future, exc_info=True) + + def timeout_callback(): + if not result.done(): + result.set_exception(TimeoutError("Timeout")) + # In case the wrapped future goes on to fail, log it. + future_add_done_callback(future, error_callback) + timeout_handle = io_loop.add_timeout( + timeout, timeout_callback) + if isinstance(future, Future): + # We know this future will resolve on the IOLoop, so we don't + # need the extra thread-safety of IOLoop.add_future (and we also + # don't care about StackContext here. + future_add_done_callback( + future, lambda future: io_loop.remove_timeout(timeout_handle)) + else: + # concurrent.futures.Futures may resolve on any thread, so we + # need to route them back to the IOLoop. + io_loop.add_future( + future, lambda future: io_loop.remove_timeout(timeout_handle)) + return result + + +def sleep(duration): + """Return a `.Future` that resolves after the given number of seconds. + + When used with ``yield`` in a coroutine, this is a non-blocking + analogue to `time.sleep` (which should not be used in coroutines + because it is blocking):: + + yield gen.sleep(0.5) + + Note that calling this function on its own does nothing; you must + wait on the `.Future` it returns (usually by yielding it). + + .. versionadded:: 4.1 + """ + f = _create_future() + IOLoop.current().call_later(duration, + lambda: future_set_result_unless_cancelled(f, None)) + return f + + +class _NullFuture(object): + """_NullFuture resembles a Future that finished with a result of None. + + It's not actually a `Future` to avoid depending on a particular event loop. + Handled as a special case in the coroutine runner. + """ + def result(self): + return None + + def done(self): + return True + + +# _null_future is used as a dummy value in the coroutine runner. It differs +# from moment in that moment always adds a delay of one IOLoop iteration +# while _null_future is processed as soon as possible. +_null_future = _NullFuture() + +moment = _NullFuture() +moment.__doc__ = \ + """A special object which may be yielded to allow the IOLoop to run for +one iteration. + +This is not needed in normal use but it can be helpful in long-running +coroutines that are likely to yield Futures that are ready instantly. + +Usage: ``yield gen.moment`` + +.. versionadded:: 4.0 + +.. deprecated:: 4.5 + ``yield None`` (or ``yield`` with no argument) is now equivalent to + ``yield gen.moment``. +""" + + +class Runner(object): + """Internal implementation of `tornado.gen.engine`. + + Maintains information about pending callbacks and their results. + + The results of the generator are stored in ``result_future`` (a + `.Future`) + """ + def __init__(self, gen, result_future, first_yielded): + self.gen = gen + self.result_future = result_future + self.future = _null_future + self.yield_point = None + self.pending_callbacks = None + self.results = None + self.running = False + self.finished = False + self.had_exception = False + self.io_loop = IOLoop.current() + # For efficiency, we do not create a stack context until we + # reach a YieldPoint (stack contexts are required for the historical + # semantics of YieldPoints, but not for Futures). When we have + # done so, this field will be set and must be called at the end + # of the coroutine. + self.stack_context_deactivate = None + if self.handle_yield(first_yielded): + gen = result_future = first_yielded = None + self.run() + + def register_callback(self, key): + """Adds ``key`` to the list of callbacks.""" + if self.pending_callbacks is None: + # Lazily initialize the old-style YieldPoint data structures. + self.pending_callbacks = set() + self.results = {} + if key in self.pending_callbacks: + raise KeyReuseError("key %r is already pending" % (key,)) + self.pending_callbacks.add(key) + + def is_ready(self, key): + """Returns true if a result is available for ``key``.""" + if self.pending_callbacks is None or key not in self.pending_callbacks: + raise UnknownKeyError("key %r is not pending" % (key,)) + return key in self.results + + def set_result(self, key, result): + """Sets the result for ``key`` and attempts to resume the generator.""" + self.results[key] = result + if self.yield_point is not None and self.yield_point.is_ready(): + try: + future_set_result_unless_cancelled(self.future, + self.yield_point.get_result()) + except: + future_set_exc_info(self.future, sys.exc_info()) + self.yield_point = None + self.run() + + def pop_result(self, key): + """Returns the result for ``key`` and unregisters it.""" + self.pending_callbacks.remove(key) + return self.results.pop(key) + + def run(self): + """Starts or resumes the generator, running until it reaches a + yield point that is not ready. + """ + if self.running or self.finished: + return + try: + self.running = True + while True: + future = self.future + if not future.done(): + return + self.future = None + try: + orig_stack_contexts = stack_context._state.contexts + exc_info = None + + try: + value = future.result() + except Exception: + self.had_exception = True + exc_info = sys.exc_info() + future = None + + if exc_info is not None: + try: + yielded = self.gen.throw(*exc_info) + finally: + # Break up a reference to itself + # for faster GC on CPython. + exc_info = None + else: + yielded = self.gen.send(value) + + if stack_context._state.contexts is not orig_stack_contexts: + self.gen.throw( + stack_context.StackContextInconsistentError( + 'stack_context inconsistency (probably caused ' + 'by yield within a "with StackContext" block)')) + except (StopIteration, Return) as e: + self.finished = True + self.future = _null_future + if self.pending_callbacks and not self.had_exception: + # If we ran cleanly without waiting on all callbacks + # raise an error (really more of a warning). If we + # had an exception then some callbacks may have been + # orphaned, so skip the check in that case. + raise LeakedCallbackError( + "finished without waiting for callbacks %r" % + self.pending_callbacks) + future_set_result_unless_cancelled(self.result_future, + _value_from_stopiteration(e)) + self.result_future = None + self._deactivate_stack_context() + return + except Exception: + self.finished = True + self.future = _null_future + future_set_exc_info(self.result_future, sys.exc_info()) + self.result_future = None + self._deactivate_stack_context() + return + if not self.handle_yield(yielded): + return + yielded = None + finally: + self.running = False + + def handle_yield(self, yielded): + # Lists containing YieldPoints require stack contexts; + # other lists are handled in convert_yielded. + if _contains_yieldpoint(yielded): + yielded = multi(yielded) + + if isinstance(yielded, YieldPoint): + # YieldPoints are too closely coupled to the Runner to go + # through the generic convert_yielded mechanism. + self.future = Future() + + def start_yield_point(): + try: + yielded.start(self) + if yielded.is_ready(): + future_set_result_unless_cancelled(self.future, yielded.get_result()) + else: + self.yield_point = yielded + except Exception: + self.future = Future() + future_set_exc_info(self.future, sys.exc_info()) + + if self.stack_context_deactivate is None: + # Start a stack context if this is the first + # YieldPoint we've seen. + with stack_context.ExceptionStackContext( + self.handle_exception) as deactivate: + self.stack_context_deactivate = deactivate + + def cb(): + start_yield_point() + self.run() + self.io_loop.add_callback(cb) + return False + else: + start_yield_point() + else: + try: + self.future = convert_yielded(yielded) + except BadYieldError: + self.future = Future() + future_set_exc_info(self.future, sys.exc_info()) + + if self.future is moment: + self.io_loop.add_callback(self.run) + return False + elif not self.future.done(): + def inner(f): + # Break a reference cycle to speed GC. + f = None # noqa + self.run() + self.io_loop.add_future( + self.future, inner) + return False + return True + + def result_callback(self, key): + return stack_context.wrap(_argument_adapter( + functools.partial(self.set_result, key))) + + def handle_exception(self, typ, value, tb): + if not self.running and not self.finished: + self.future = Future() + future_set_exc_info(self.future, (typ, value, tb)) + self.run() + return True + else: + return False + + def _deactivate_stack_context(self): + if self.stack_context_deactivate is not None: + self.stack_context_deactivate() + self.stack_context_deactivate = None + + +Arguments = collections.namedtuple('Arguments', ['args', 'kwargs']) + + +def _argument_adapter(callback): + """Returns a function that when invoked runs ``callback`` with one arg. + + If the function returned by this function is called with exactly + one argument, that argument is passed to ``callback``. Otherwise + the args tuple and kwargs dict are wrapped in an `Arguments` object. + """ + def wrapper(*args, **kwargs): + if kwargs or len(args) > 1: + callback(Arguments(args, kwargs)) + elif args: + callback(args[0]) + else: + callback(None) + return wrapper + + +# Convert Awaitables into Futures. +try: + import asyncio +except ImportError: + # Py2-compatible version for use with Cython. + # Copied from PEP 380. + @coroutine + def _wrap_awaitable(x): + if hasattr(x, '__await__'): + _i = x.__await__() + else: + _i = iter(x) + try: + _y = next(_i) + except StopIteration as _e: + _r = _value_from_stopiteration(_e) + else: + while 1: + try: + _s = yield _y + except GeneratorExit as _e: + try: + _m = _i.close + except AttributeError: + pass + else: + _m() + raise _e + except BaseException as _e: + _x = sys.exc_info() + try: + _m = _i.throw + except AttributeError: + raise _e + else: + try: + _y = _m(*_x) + except StopIteration as _e: + _r = _value_from_stopiteration(_e) + break + else: + try: + if _s is None: + _y = next(_i) + else: + _y = _i.send(_s) + except StopIteration as _e: + _r = _value_from_stopiteration(_e) + break + raise Return(_r) +else: + try: + _wrap_awaitable = asyncio.ensure_future + except AttributeError: + # asyncio.ensure_future was introduced in Python 3.4.4, but + # Debian jessie still ships with 3.4.2 so try the old name. + _wrap_awaitable = getattr(asyncio, 'async') + + +def convert_yielded(yielded): + """Convert a yielded object into a `.Future`. + + The default implementation accepts lists, dictionaries, and Futures. + + If the `~functools.singledispatch` library is available, this function + may be extended to support additional types. For example:: + + @convert_yielded.register(asyncio.Future) + def _(asyncio_future): + return tornado.platform.asyncio.to_tornado_future(asyncio_future) + + .. versionadded:: 4.1 + """ + # Lists and dicts containing YieldPoints were handled earlier. + if yielded is None or yielded is moment: + return moment + elif yielded is _null_future: + return _null_future + elif isinstance(yielded, (list, dict)): + return multi(yielded) + elif is_future(yielded): + return yielded + elif isawaitable(yielded): + return _wrap_awaitable(yielded) + else: + raise BadYieldError("yielded unknown object %r" % (yielded,)) + + +if singledispatch is not None: + convert_yielded = singledispatch(convert_yielded) diff --git a/server/www/packages/packages-windows/x86/tornado/http1connection.py b/server/www/packages/packages-windows/x86/tornado/http1connection.py new file mode 100644 index 0000000..6cc4071 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/http1connection.py @@ -0,0 +1,751 @@ +# +# Copyright 2014 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Client and server implementations of HTTP/1.x. + +.. versionadded:: 4.0 +""" + +from __future__ import absolute_import, division, print_function + +import re +import warnings + +from tornado.concurrent import (Future, future_add_done_callback, + future_set_result_unless_cancelled) +from tornado.escape import native_str, utf8 +from tornado import gen +from tornado import httputil +from tornado import iostream +from tornado.log import gen_log, app_log +from tornado import stack_context +from tornado.util import GzipDecompressor, PY3 + + +class _QuietException(Exception): + def __init__(self): + pass + + +class _ExceptionLoggingContext(object): + """Used with the ``with`` statement when calling delegate methods to + log any exceptions with the given logger. Any exceptions caught are + converted to _QuietException + """ + def __init__(self, logger): + self.logger = logger + + def __enter__(self): + pass + + def __exit__(self, typ, value, tb): + if value is not None: + self.logger.error("Uncaught exception", exc_info=(typ, value, tb)) + raise _QuietException + + +class HTTP1ConnectionParameters(object): + """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`. + """ + def __init__(self, no_keep_alive=False, chunk_size=None, + max_header_size=None, header_timeout=None, max_body_size=None, + body_timeout=None, decompress=False): + """ + :arg bool no_keep_alive: If true, always close the connection after + one request. + :arg int chunk_size: how much data to read into memory at once + :arg int max_header_size: maximum amount of data for HTTP headers + :arg float header_timeout: how long to wait for all headers (seconds) + :arg int max_body_size: maximum amount of data for body + :arg float body_timeout: how long to wait while reading body (seconds) + :arg bool decompress: if true, decode incoming + ``Content-Encoding: gzip`` + """ + self.no_keep_alive = no_keep_alive + self.chunk_size = chunk_size or 65536 + self.max_header_size = max_header_size or 65536 + self.header_timeout = header_timeout + self.max_body_size = max_body_size + self.body_timeout = body_timeout + self.decompress = decompress + + +class HTTP1Connection(httputil.HTTPConnection): + """Implements the HTTP/1.x protocol. + + This class can be on its own for clients, or via `HTTP1ServerConnection` + for servers. + """ + def __init__(self, stream, is_client, params=None, context=None): + """ + :arg stream: an `.IOStream` + :arg bool is_client: client or server + :arg params: a `.HTTP1ConnectionParameters` instance or ``None`` + :arg context: an opaque application-defined object that can be accessed + as ``connection.context``. + """ + self.is_client = is_client + self.stream = stream + if params is None: + params = HTTP1ConnectionParameters() + self.params = params + self.context = context + self.no_keep_alive = params.no_keep_alive + # The body limits can be altered by the delegate, so save them + # here instead of just referencing self.params later. + self._max_body_size = (self.params.max_body_size or + self.stream.max_buffer_size) + self._body_timeout = self.params.body_timeout + # _write_finished is set to True when finish() has been called, + # i.e. there will be no more data sent. Data may still be in the + # stream's write buffer. + self._write_finished = False + # True when we have read the entire incoming body. + self._read_finished = False + # _finish_future resolves when all data has been written and flushed + # to the IOStream. + self._finish_future = Future() + # If true, the connection should be closed after this request + # (after the response has been written in the server side, + # and after it has been read in the client) + self._disconnect_on_finish = False + self._clear_callbacks() + # Save the start lines after we read or write them; they + # affect later processing (e.g. 304 responses and HEAD methods + # have content-length but no bodies) + self._request_start_line = None + self._response_start_line = None + self._request_headers = None + # True if we are writing output with chunked encoding. + self._chunking_output = None + # While reading a body with a content-length, this is the + # amount left to read. + self._expected_content_remaining = None + # A Future for our outgoing writes, returned by IOStream.write. + self._pending_write = None + + def read_response(self, delegate): + """Read a single HTTP response. + + Typical client-mode usage is to write a request using `write_headers`, + `write`, and `finish`, and then call ``read_response``. + + :arg delegate: a `.HTTPMessageDelegate` + + Returns a `.Future` that resolves to None after the full response has + been read. + """ + if self.params.decompress: + delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) + return self._read_message(delegate) + + @gen.coroutine + def _read_message(self, delegate): + need_delegate_close = False + try: + header_future = self.stream.read_until_regex( + b"\r?\n\r?\n", + max_bytes=self.params.max_header_size) + if self.params.header_timeout is None: + header_data = yield header_future + else: + try: + header_data = yield gen.with_timeout( + self.stream.io_loop.time() + self.params.header_timeout, + header_future, + quiet_exceptions=iostream.StreamClosedError) + except gen.TimeoutError: + self.close() + raise gen.Return(False) + start_line, headers = self._parse_headers(header_data) + if self.is_client: + start_line = httputil.parse_response_start_line(start_line) + self._response_start_line = start_line + else: + start_line = httputil.parse_request_start_line(start_line) + self._request_start_line = start_line + self._request_headers = headers + + self._disconnect_on_finish = not self._can_keep_alive( + start_line, headers) + need_delegate_close = True + with _ExceptionLoggingContext(app_log): + header_future = delegate.headers_received(start_line, headers) + if header_future is not None: + yield header_future + if self.stream is None: + # We've been detached. + need_delegate_close = False + raise gen.Return(False) + skip_body = False + if self.is_client: + if (self._request_start_line is not None and + self._request_start_line.method == 'HEAD'): + skip_body = True + code = start_line.code + if code == 304: + # 304 responses may include the content-length header + # but do not actually have a body. + # http://tools.ietf.org/html/rfc7230#section-3.3 + skip_body = True + if code >= 100 and code < 200: + # 1xx responses should never indicate the presence of + # a body. + if ('Content-Length' in headers or + 'Transfer-Encoding' in headers): + raise httputil.HTTPInputError( + "Response code %d cannot have body" % code) + # TODO: client delegates will get headers_received twice + # in the case of a 100-continue. Document or change? + yield self._read_message(delegate) + else: + if (headers.get("Expect") == "100-continue" and + not self._write_finished): + self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") + if not skip_body: + body_future = self._read_body( + start_line.code if self.is_client else 0, headers, delegate) + if body_future is not None: + if self._body_timeout is None: + yield body_future + else: + try: + yield gen.with_timeout( + self.stream.io_loop.time() + self._body_timeout, + body_future, + quiet_exceptions=iostream.StreamClosedError) + except gen.TimeoutError: + gen_log.info("Timeout reading body from %s", + self.context) + self.stream.close() + raise gen.Return(False) + self._read_finished = True + if not self._write_finished or self.is_client: + need_delegate_close = False + with _ExceptionLoggingContext(app_log): + delegate.finish() + # If we're waiting for the application to produce an asynchronous + # response, and we're not detached, register a close callback + # on the stream (we didn't need one while we were reading) + if (not self._finish_future.done() and + self.stream is not None and + not self.stream.closed()): + self.stream.set_close_callback(self._on_connection_close) + yield self._finish_future + if self.is_client and self._disconnect_on_finish: + self.close() + if self.stream is None: + raise gen.Return(False) + except httputil.HTTPInputError as e: + gen_log.info("Malformed HTTP message from %s: %s", + self.context, e) + if not self.is_client: + yield self.stream.write(b'HTTP/1.1 400 Bad Request\r\n\r\n') + self.close() + raise gen.Return(False) + finally: + if need_delegate_close: + with _ExceptionLoggingContext(app_log): + delegate.on_connection_close() + header_future = None + self._clear_callbacks() + raise gen.Return(True) + + def _clear_callbacks(self): + """Clears the callback attributes. + + This allows the request handler to be garbage collected more + quickly in CPython by breaking up reference cycles. + """ + self._write_callback = None + self._write_future = None + self._close_callback = None + if self.stream is not None: + self.stream.set_close_callback(None) + + def set_close_callback(self, callback): + """Sets a callback that will be run when the connection is closed. + + Note that this callback is slightly different from + `.HTTPMessageDelegate.on_connection_close`: The + `.HTTPMessageDelegate` method is called when the connection is + closed while recieving a message. This callback is used when + there is not an active delegate (for example, on the server + side this callback is used if the client closes the connection + after sending its request but before receiving all the + response. + """ + self._close_callback = stack_context.wrap(callback) + + def _on_connection_close(self): + # Note that this callback is only registered on the IOStream + # when we have finished reading the request and are waiting for + # the application to produce its response. + if self._close_callback is not None: + callback = self._close_callback + self._close_callback = None + callback() + if not self._finish_future.done(): + future_set_result_unless_cancelled(self._finish_future, None) + self._clear_callbacks() + + def close(self): + if self.stream is not None: + self.stream.close() + self._clear_callbacks() + if not self._finish_future.done(): + future_set_result_unless_cancelled(self._finish_future, None) + + def detach(self): + """Take control of the underlying stream. + + Returns the underlying `.IOStream` object and stops all further + HTTP processing. May only be called during + `.HTTPMessageDelegate.headers_received`. Intended for implementing + protocols like websockets that tunnel over an HTTP handshake. + """ + self._clear_callbacks() + stream = self.stream + self.stream = None + if not self._finish_future.done(): + future_set_result_unless_cancelled(self._finish_future, None) + return stream + + def set_body_timeout(self, timeout): + """Sets the body timeout for a single request. + + Overrides the value from `.HTTP1ConnectionParameters`. + """ + self._body_timeout = timeout + + def set_max_body_size(self, max_body_size): + """Sets the body size limit for a single request. + + Overrides the value from `.HTTP1ConnectionParameters`. + """ + self._max_body_size = max_body_size + + def write_headers(self, start_line, headers, chunk=None, callback=None): + """Implements `.HTTPConnection.write_headers`.""" + lines = [] + if self.is_client: + self._request_start_line = start_line + lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1]))) + # Client requests with a non-empty body must have either a + # Content-Length or a Transfer-Encoding. + self._chunking_output = ( + start_line.method in ('POST', 'PUT', 'PATCH') and + 'Content-Length' not in headers and + 'Transfer-Encoding' not in headers) + else: + self._response_start_line = start_line + lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2]))) + self._chunking_output = ( + # TODO: should this use + # self._request_start_line.version or + # start_line.version? + self._request_start_line.version == 'HTTP/1.1' and + # 1xx, 204 and 304 responses have no body (not even a zero-length + # body), and so should not have either Content-Length or + # Transfer-Encoding headers. + start_line.code not in (204, 304) and + (start_line.code < 100 or start_line.code >= 200) and + # No need to chunk the output if a Content-Length is specified. + 'Content-Length' not in headers and + # Applications are discouraged from touching Transfer-Encoding, + # but if they do, leave it alone. + 'Transfer-Encoding' not in headers) + # If connection to a 1.1 client will be closed, inform client + if (self._request_start_line.version == 'HTTP/1.1' and self._disconnect_on_finish): + headers['Connection'] = 'close' + # If a 1.0 client asked for keep-alive, add the header. + if (self._request_start_line.version == 'HTTP/1.0' and + self._request_headers.get('Connection', '').lower() == 'keep-alive'): + headers['Connection'] = 'Keep-Alive' + if self._chunking_output: + headers['Transfer-Encoding'] = 'chunked' + if (not self.is_client and + (self._request_start_line.method == 'HEAD' or + start_line.code == 304)): + self._expected_content_remaining = 0 + elif 'Content-Length' in headers: + self._expected_content_remaining = int(headers['Content-Length']) + else: + self._expected_content_remaining = None + # TODO: headers are supposed to be of type str, but we still have some + # cases that let bytes slip through. Remove these native_str calls when those + # are fixed. + header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all()) + if PY3: + lines.extend(l.encode('latin1') for l in header_lines) + else: + lines.extend(header_lines) + for line in lines: + if b'\n' in line: + raise ValueError('Newline in header: ' + repr(line)) + future = None + if self.stream.closed(): + future = self._write_future = Future() + future.set_exception(iostream.StreamClosedError()) + future.exception() + else: + if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) + self._write_callback = stack_context.wrap(callback) + else: + future = self._write_future = Future() + data = b"\r\n".join(lines) + b"\r\n\r\n" + if chunk: + data += self._format_chunk(chunk) + self._pending_write = self.stream.write(data) + future_add_done_callback(self._pending_write, self._on_write_complete) + return future + + def _format_chunk(self, chunk): + if self._expected_content_remaining is not None: + self._expected_content_remaining -= len(chunk) + if self._expected_content_remaining < 0: + # Close the stream now to stop further framing errors. + self.stream.close() + raise httputil.HTTPOutputError( + "Tried to write more data than Content-Length") + if self._chunking_output and chunk: + # Don't write out empty chunks because that means END-OF-STREAM + # with chunked encoding + return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n" + else: + return chunk + + def write(self, chunk, callback=None): + """Implements `.HTTPConnection.write`. + + For backwards compatibility it is allowed but deprecated to + skip `write_headers` and instead call `write()` with a + pre-encoded header block. + """ + future = None + if self.stream.closed(): + future = self._write_future = Future() + self._write_future.set_exception(iostream.StreamClosedError()) + self._write_future.exception() + else: + if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) + self._write_callback = stack_context.wrap(callback) + else: + future = self._write_future = Future() + self._pending_write = self.stream.write(self._format_chunk(chunk)) + self._pending_write.add_done_callback(self._on_write_complete) + return future + + def finish(self): + """Implements `.HTTPConnection.finish`.""" + if (self._expected_content_remaining is not None and + self._expected_content_remaining != 0 and + not self.stream.closed()): + self.stream.close() + raise httputil.HTTPOutputError( + "Tried to write %d bytes less than Content-Length" % + self._expected_content_remaining) + if self._chunking_output: + if not self.stream.closed(): + self._pending_write = self.stream.write(b"0\r\n\r\n") + self._pending_write.add_done_callback(self._on_write_complete) + self._write_finished = True + # If the app finished the request while we're still reading, + # divert any remaining data away from the delegate and + # close the connection when we're done sending our response. + # Closing the connection is the only way to avoid reading the + # whole input body. + if not self._read_finished: + self._disconnect_on_finish = True + # No more data is coming, so instruct TCP to send any remaining + # data immediately instead of waiting for a full packet or ack. + self.stream.set_nodelay(True) + if self._pending_write is None: + self._finish_request(None) + else: + future_add_done_callback(self._pending_write, self._finish_request) + + def _on_write_complete(self, future): + exc = future.exception() + if exc is not None and not isinstance(exc, iostream.StreamClosedError): + future.result() + if self._write_callback is not None: + callback = self._write_callback + self._write_callback = None + self.stream.io_loop.add_callback(callback) + if self._write_future is not None: + future = self._write_future + self._write_future = None + future_set_result_unless_cancelled(future, None) + + def _can_keep_alive(self, start_line, headers): + if self.params.no_keep_alive: + return False + connection_header = headers.get("Connection") + if connection_header is not None: + connection_header = connection_header.lower() + if start_line.version == "HTTP/1.1": + return connection_header != "close" + elif ("Content-Length" in headers or + headers.get("Transfer-Encoding", "").lower() == "chunked" or + getattr(start_line, 'method', None) in ("HEAD", "GET")): + # start_line may be a request or response start line; only + # the former has a method attribute. + return connection_header == "keep-alive" + return False + + def _finish_request(self, future): + self._clear_callbacks() + if not self.is_client and self._disconnect_on_finish: + self.close() + return + # Turn Nagle's algorithm back on, leaving the stream in its + # default state for the next request. + self.stream.set_nodelay(False) + if not self._finish_future.done(): + future_set_result_unless_cancelled(self._finish_future, None) + + def _parse_headers(self, data): + # The lstrip removes newlines that some implementations sometimes + # insert between messages of a reused connection. Per RFC 7230, + # we SHOULD ignore at least one empty line before the request. + # http://tools.ietf.org/html/rfc7230#section-3.5 + data = native_str(data.decode('latin1')).lstrip("\r\n") + # RFC 7230 section allows for both CRLF and bare LF. + eol = data.find("\n") + start_line = data[:eol].rstrip("\r") + headers = httputil.HTTPHeaders.parse(data[eol:]) + return start_line, headers + + def _read_body(self, code, headers, delegate): + if "Content-Length" in headers: + if "Transfer-Encoding" in headers: + # Response cannot contain both Content-Length and + # Transfer-Encoding headers. + # http://tools.ietf.org/html/rfc7230#section-3.3.3 + raise httputil.HTTPInputError( + "Response with both Transfer-Encoding and Content-Length") + if "," in headers["Content-Length"]: + # Proxies sometimes cause Content-Length headers to get + # duplicated. If all the values are identical then we can + # use them but if they differ it's an error. + pieces = re.split(r',\s*', headers["Content-Length"]) + if any(i != pieces[0] for i in pieces): + raise httputil.HTTPInputError( + "Multiple unequal Content-Lengths: %r" % + headers["Content-Length"]) + headers["Content-Length"] = pieces[0] + + try: + content_length = int(headers["Content-Length"]) + except ValueError: + # Handles non-integer Content-Length value. + raise httputil.HTTPInputError( + "Only integer Content-Length is allowed: %s" % headers["Content-Length"]) + + if content_length > self._max_body_size: + raise httputil.HTTPInputError("Content-Length too long") + else: + content_length = None + + if code == 204: + # This response code is not allowed to have a non-empty body, + # and has an implicit length of zero instead of read-until-close. + # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 + if ("Transfer-Encoding" in headers or + content_length not in (None, 0)): + raise httputil.HTTPInputError( + "Response with code %d should not have body" % code) + content_length = 0 + + if content_length is not None: + return self._read_fixed_body(content_length, delegate) + if headers.get("Transfer-Encoding", "").lower() == "chunked": + return self._read_chunked_body(delegate) + if self.is_client: + return self._read_body_until_close(delegate) + return None + + @gen.coroutine + def _read_fixed_body(self, content_length, delegate): + while content_length > 0: + body = yield self.stream.read_bytes( + min(self.params.chunk_size, content_length), partial=True) + content_length -= len(body) + if not self._write_finished or self.is_client: + with _ExceptionLoggingContext(app_log): + ret = delegate.data_received(body) + if ret is not None: + yield ret + + @gen.coroutine + def _read_chunked_body(self, delegate): + # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 + total_size = 0 + while True: + chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64) + chunk_len = int(chunk_len.strip(), 16) + if chunk_len == 0: + crlf = yield self.stream.read_bytes(2) + if crlf != b'\r\n': + raise httputil.HTTPInputError("improperly terminated chunked request") + return + total_size += chunk_len + if total_size > self._max_body_size: + raise httputil.HTTPInputError("chunked body too large") + bytes_to_read = chunk_len + while bytes_to_read: + chunk = yield self.stream.read_bytes( + min(bytes_to_read, self.params.chunk_size), partial=True) + bytes_to_read -= len(chunk) + if not self._write_finished or self.is_client: + with _ExceptionLoggingContext(app_log): + ret = delegate.data_received(chunk) + if ret is not None: + yield ret + # chunk ends with \r\n + crlf = yield self.stream.read_bytes(2) + assert crlf == b"\r\n" + + @gen.coroutine + def _read_body_until_close(self, delegate): + body = yield self.stream.read_until_close() + if not self._write_finished or self.is_client: + with _ExceptionLoggingContext(app_log): + delegate.data_received(body) + + +class _GzipMessageDelegate(httputil.HTTPMessageDelegate): + """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``. + """ + def __init__(self, delegate, chunk_size): + self._delegate = delegate + self._chunk_size = chunk_size + self._decompressor = None + + def headers_received(self, start_line, headers): + if headers.get("Content-Encoding") == "gzip": + self._decompressor = GzipDecompressor() + # Downstream delegates will only see uncompressed data, + # so rename the content-encoding header. + # (but note that curl_httpclient doesn't do this). + headers.add("X-Consumed-Content-Encoding", + headers["Content-Encoding"]) + del headers["Content-Encoding"] + return self._delegate.headers_received(start_line, headers) + + @gen.coroutine + def data_received(self, chunk): + if self._decompressor: + compressed_data = chunk + while compressed_data: + decompressed = self._decompressor.decompress( + compressed_data, self._chunk_size) + if decompressed: + ret = self._delegate.data_received(decompressed) + if ret is not None: + yield ret + compressed_data = self._decompressor.unconsumed_tail + else: + ret = self._delegate.data_received(chunk) + if ret is not None: + yield ret + + def finish(self): + if self._decompressor is not None: + tail = self._decompressor.flush() + if tail: + # I believe the tail will always be empty (i.e. + # decompress will return all it can). The purpose + # of the flush call is to detect errors such + # as truncated input. But in case it ever returns + # anything, treat it as an extra chunk + self._delegate.data_received(tail) + return self._delegate.finish() + + def on_connection_close(self): + return self._delegate.on_connection_close() + + +class HTTP1ServerConnection(object): + """An HTTP/1.x server.""" + def __init__(self, stream, params=None, context=None): + """ + :arg stream: an `.IOStream` + :arg params: a `.HTTP1ConnectionParameters` or None + :arg context: an opaque application-defined object that is accessible + as ``connection.context`` + """ + self.stream = stream + if params is None: + params = HTTP1ConnectionParameters() + self.params = params + self.context = context + self._serving_future = None + + @gen.coroutine + def close(self): + """Closes the connection. + + Returns a `.Future` that resolves after the serving loop has exited. + """ + self.stream.close() + # Block until the serving loop is done, but ignore any exceptions + # (start_serving is already responsible for logging them). + try: + yield self._serving_future + except Exception: + pass + + def start_serving(self, delegate): + """Starts serving requests on this connection. + + :arg delegate: a `.HTTPServerConnectionDelegate` + """ + assert isinstance(delegate, httputil.HTTPServerConnectionDelegate) + self._serving_future = self._server_request_loop(delegate) + # Register the future on the IOLoop so its errors get logged. + self.stream.io_loop.add_future(self._serving_future, + lambda f: f.result()) + + @gen.coroutine + def _server_request_loop(self, delegate): + try: + while True: + conn = HTTP1Connection(self.stream, False, + self.params, self.context) + request_delegate = delegate.start_request(self, conn) + try: + ret = yield conn.read_response(request_delegate) + except (iostream.StreamClosedError, + iostream.UnsatisfiableReadError): + return + except _QuietException: + # This exception was already logged. + conn.close() + return + except Exception: + gen_log.error("Uncaught exception", exc_info=True) + conn.close() + return + if not ret: + return + yield gen.moment + finally: + delegate.on_close(self) diff --git a/server/www/packages/packages-windows/x86/tornado/httpclient.py b/server/www/packages/packages-windows/x86/tornado/httpclient.py new file mode 100644 index 0000000..5ed2ee6 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/httpclient.py @@ -0,0 +1,748 @@ +"""Blocking and non-blocking HTTP client interfaces. + +This module defines a common interface shared by two implementations, +``simple_httpclient`` and ``curl_httpclient``. Applications may either +instantiate their chosen implementation class directly or use the +`AsyncHTTPClient` class from this module, which selects an implementation +that can be overridden with the `AsyncHTTPClient.configure` method. + +The default implementation is ``simple_httpclient``, and this is expected +to be suitable for most users' needs. However, some applications may wish +to switch to ``curl_httpclient`` for reasons such as the following: + +* ``curl_httpclient`` has some features not found in ``simple_httpclient``, + including support for HTTP proxies and the ability to use a specified + network interface. + +* ``curl_httpclient`` is more likely to be compatible with sites that are + not-quite-compliant with the HTTP spec, or sites that use little-exercised + features of HTTP. + +* ``curl_httpclient`` is faster. + +* ``curl_httpclient`` was the default prior to Tornado 2.0. + +Note that if you are using ``curl_httpclient``, it is highly +recommended that you use a recent version of ``libcurl`` and +``pycurl``. Currently the minimum supported version of libcurl is +7.22.0, and the minimum version of pycurl is 7.18.2. It is highly +recommended that your ``libcurl`` installation is built with +asynchronous DNS resolver (threaded or c-ares), otherwise you may +encounter various problems with request timeouts (for more +information, see +http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS +and comments in curl_httpclient.py). + +To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup:: + + AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") +""" + +from __future__ import absolute_import, division, print_function + +import functools +import time +import warnings +import weakref + +from tornado.concurrent import Future, future_set_result_unless_cancelled +from tornado.escape import utf8, native_str +from tornado import gen, httputil, stack_context +from tornado.ioloop import IOLoop +from tornado.util import Configurable + + +class HTTPClient(object): + """A blocking HTTP client. + + This interface is provided to make it easier to share code between + synchronous and asynchronous applications. Applications that are + running an `.IOLoop` must use `AsyncHTTPClient` instead. + + Typical usage looks like this:: + + http_client = httpclient.HTTPClient() + try: + response = http_client.fetch("http://www.google.com/") + print(response.body) + except httpclient.HTTPError as e: + # HTTPError is raised for non-200 responses; the response + # can be found in e.response. + print("Error: " + str(e)) + except Exception as e: + # Other errors are possible, such as IOError. + print("Error: " + str(e)) + http_client.close() + + .. versionchanged:: 5.0 + + Due to limitations in `asyncio`, it is no longer possible to + use the synchronous ``HTTPClient`` while an `.IOLoop` is running. + Use `AsyncHTTPClient` instead. + + """ + def __init__(self, async_client_class=None, **kwargs): + # Initialize self._closed at the beginning of the constructor + # so that an exception raised here doesn't lead to confusing + # failures in __del__. + self._closed = True + self._io_loop = IOLoop(make_current=False) + if async_client_class is None: + async_client_class = AsyncHTTPClient + # Create the client while our IOLoop is "current", without + # clobbering the thread's real current IOLoop (if any). + self._async_client = self._io_loop.run_sync( + gen.coroutine(lambda: async_client_class(**kwargs))) + self._closed = False + + def __del__(self): + self.close() + + def close(self): + """Closes the HTTPClient, freeing any resources used.""" + if not self._closed: + self._async_client.close() + self._io_loop.close() + self._closed = True + + def fetch(self, request, **kwargs): + """Executes a request, returning an `HTTPResponse`. + + The request may be either a string URL or an `HTTPRequest` object. + If it is a string, we construct an `HTTPRequest` using any additional + kwargs: ``HTTPRequest(request, **kwargs)`` + + If an error occurs during the fetch, we raise an `HTTPError` unless + the ``raise_error`` keyword argument is set to False. + """ + response = self._io_loop.run_sync(functools.partial( + self._async_client.fetch, request, **kwargs)) + return response + + +class AsyncHTTPClient(Configurable): + """An non-blocking HTTP client. + + Example usage:: + + async def f(): + http_client = AsyncHTTPClient() + try: + response = await http_client.fetch("http://www.google.com") + except Exception as e: + print("Error: %s" % e) + else: + print(response.body) + + The constructor for this class is magic in several respects: It + actually creates an instance of an implementation-specific + subclass, and instances are reused as a kind of pseudo-singleton + (one per `.IOLoop`). The keyword argument ``force_instance=True`` + can be used to suppress this singleton behavior. Unless + ``force_instance=True`` is used, no arguments should be passed to + the `AsyncHTTPClient` constructor. The implementation subclass as + well as arguments to its constructor can be set with the static + method `configure()` + + All `AsyncHTTPClient` implementations support a ``defaults`` + keyword argument, which can be used to set default values for + `HTTPRequest` attributes. For example:: + + AsyncHTTPClient.configure( + None, defaults=dict(user_agent="MyUserAgent")) + # or with force_instance: + client = AsyncHTTPClient(force_instance=True, + defaults=dict(user_agent="MyUserAgent")) + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + """ + @classmethod + def configurable_base(cls): + return AsyncHTTPClient + + @classmethod + def configurable_default(cls): + from tornado.simple_httpclient import SimpleAsyncHTTPClient + return SimpleAsyncHTTPClient + + @classmethod + def _async_clients(cls): + attr_name = '_async_client_dict_' + cls.__name__ + if not hasattr(cls, attr_name): + setattr(cls, attr_name, weakref.WeakKeyDictionary()) + return getattr(cls, attr_name) + + def __new__(cls, force_instance=False, **kwargs): + io_loop = IOLoop.current() + if force_instance: + instance_cache = None + else: + instance_cache = cls._async_clients() + if instance_cache is not None and io_loop in instance_cache: + return instance_cache[io_loop] + instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) + # Make sure the instance knows which cache to remove itself from. + # It can't simply call _async_clients() because we may be in + # __new__(AsyncHTTPClient) but instance.__class__ may be + # SimpleAsyncHTTPClient. + instance._instance_cache = instance_cache + if instance_cache is not None: + instance_cache[instance.io_loop] = instance + return instance + + def initialize(self, defaults=None): + self.io_loop = IOLoop.current() + self.defaults = dict(HTTPRequest._DEFAULTS) + if defaults is not None: + self.defaults.update(defaults) + self._closed = False + + def close(self): + """Destroys this HTTP client, freeing any file descriptors used. + + This method is **not needed in normal use** due to the way + that `AsyncHTTPClient` objects are transparently reused. + ``close()`` is generally only necessary when either the + `.IOLoop` is also being closed, or the ``force_instance=True`` + argument was used when creating the `AsyncHTTPClient`. + + No other methods may be called on the `AsyncHTTPClient` after + ``close()``. + + """ + if self._closed: + return + self._closed = True + if self._instance_cache is not None: + if self._instance_cache.get(self.io_loop) is not self: + raise RuntimeError("inconsistent AsyncHTTPClient cache") + del self._instance_cache[self.io_loop] + + def fetch(self, request, callback=None, raise_error=True, **kwargs): + """Executes a request, asynchronously returning an `HTTPResponse`. + + The request may be either a string URL or an `HTTPRequest` object. + If it is a string, we construct an `HTTPRequest` using any additional + kwargs: ``HTTPRequest(request, **kwargs)`` + + This method returns a `.Future` whose result is an + `HTTPResponse`. By default, the ``Future`` will raise an + `HTTPError` if the request returned a non-200 response code + (other errors may also be raised if the server could not be + contacted). Instead, if ``raise_error`` is set to False, the + response will always be returned regardless of the response + code. + + If a ``callback`` is given, it will be invoked with the `HTTPResponse`. + In the callback interface, `HTTPError` is not automatically raised. + Instead, you must check the response's ``error`` attribute or + call its `~HTTPResponse.rethrow` method. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in 6.0. Use the returned `.Future` instead. + + The ``raise_error=False`` argument currently suppresses + *all* errors, encapsulating them in `HTTPResponse` objects + with a 599 response code. This will change in Tornado 6.0: + ``raise_error=False`` will only affect the `HTTPError` + raised when a non-200 response code is used. + + """ + if self._closed: + raise RuntimeError("fetch() called on closed AsyncHTTPClient") + if not isinstance(request, HTTPRequest): + request = HTTPRequest(url=request, **kwargs) + else: + if kwargs: + raise ValueError("kwargs can't be used if request is an HTTPRequest object") + # We may modify this (to add Host, Accept-Encoding, etc), + # so make sure we don't modify the caller's object. This is also + # where normal dicts get converted to HTTPHeaders objects. + request.headers = httputil.HTTPHeaders(request.headers) + request = _RequestProxy(request, self.defaults) + future = Future() + if callback is not None: + warnings.warn("callback arguments are deprecated, use the returned Future instead", + DeprecationWarning) + callback = stack_context.wrap(callback) + + def handle_future(future): + exc = future.exception() + if isinstance(exc, HTTPError) and exc.response is not None: + response = exc.response + elif exc is not None: + response = HTTPResponse( + request, 599, error=exc, + request_time=time.time() - request.start_time) + else: + response = future.result() + self.io_loop.add_callback(callback, response) + future.add_done_callback(handle_future) + + def handle_response(response): + if raise_error and response.error: + if isinstance(response.error, HTTPError): + response.error.response = response + future.set_exception(response.error) + else: + if response.error and not response._error_is_response_code: + warnings.warn("raise_error=False will allow '%s' to be raised in the future" % + response.error, DeprecationWarning) + future_set_result_unless_cancelled(future, response) + self.fetch_impl(request, handle_response) + return future + + def fetch_impl(self, request, callback): + raise NotImplementedError() + + @classmethod + def configure(cls, impl, **kwargs): + """Configures the `AsyncHTTPClient` subclass to use. + + ``AsyncHTTPClient()`` actually creates an instance of a subclass. + This method may be called with either a class object or the + fully-qualified name of such a class (or ``None`` to use the default, + ``SimpleAsyncHTTPClient``) + + If additional keyword arguments are given, they will be passed + to the constructor of each subclass instance created. The + keyword argument ``max_clients`` determines the maximum number + of simultaneous `~AsyncHTTPClient.fetch()` operations that can + execute in parallel on each `.IOLoop`. Additional arguments + may be supported depending on the implementation class in use. + + Example:: + + AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") + """ + super(AsyncHTTPClient, cls).configure(impl, **kwargs) + + +class HTTPRequest(object): + """HTTP client request object.""" + + # Default values for HTTPRequest parameters. + # Merged with the values on the request object by AsyncHTTPClient + # implementations. + _DEFAULTS = dict( + connect_timeout=20.0, + request_timeout=20.0, + follow_redirects=True, + max_redirects=5, + decompress_response=True, + proxy_password='', + allow_nonstandard_methods=False, + validate_cert=True) + + def __init__(self, url, method="GET", headers=None, body=None, + auth_username=None, auth_password=None, auth_mode=None, + connect_timeout=None, request_timeout=None, + if_modified_since=None, follow_redirects=None, + max_redirects=None, user_agent=None, use_gzip=None, + network_interface=None, streaming_callback=None, + header_callback=None, prepare_curl_callback=None, + proxy_host=None, proxy_port=None, proxy_username=None, + proxy_password=None, proxy_auth_mode=None, + allow_nonstandard_methods=None, validate_cert=None, + ca_certs=None, allow_ipv6=None, client_key=None, + client_cert=None, body_producer=None, + expect_100_continue=False, decompress_response=None, + ssl_options=None): + r"""All parameters except ``url`` are optional. + + :arg str url: URL to fetch + :arg str method: HTTP method, e.g. "GET" or "POST" + :arg headers: Additional HTTP headers to pass on the request + :type headers: `~tornado.httputil.HTTPHeaders` or `dict` + :arg body: HTTP request body as a string (byte or unicode; if unicode + the utf-8 encoding will be used) + :arg body_producer: Callable used for lazy/asynchronous request bodies. + It is called with one argument, a ``write`` function, and should + return a `.Future`. It should call the write function with new + data as it becomes available. The write function returns a + `.Future` which can be used for flow control. + Only one of ``body`` and ``body_producer`` may + be specified. ``body_producer`` is not supported on + ``curl_httpclient``. When using ``body_producer`` it is recommended + to pass a ``Content-Length`` in the headers as otherwise chunked + encoding will be used, and many servers do not support chunked + encoding on requests. New in Tornado 4.0 + :arg str auth_username: Username for HTTP authentication + :arg str auth_password: Password for HTTP authentication + :arg str auth_mode: Authentication mode; default is "basic". + Allowed values are implementation-defined; ``curl_httpclient`` + supports "basic" and "digest"; ``simple_httpclient`` only supports + "basic" + :arg float connect_timeout: Timeout for initial connection in seconds, + default 20 seconds + :arg float request_timeout: Timeout for entire request in seconds, + default 20 seconds + :arg if_modified_since: Timestamp for ``If-Modified-Since`` header + :type if_modified_since: `datetime` or `float` + :arg bool follow_redirects: Should redirects be followed automatically + or return the 3xx response? Default True. + :arg int max_redirects: Limit for ``follow_redirects``, default 5. + :arg str user_agent: String to send as ``User-Agent`` header + :arg bool decompress_response: Request a compressed response from + the server and decompress it after downloading. Default is True. + New in Tornado 4.0. + :arg bool use_gzip: Deprecated alias for ``decompress_response`` + since Tornado 4.0. + :arg str network_interface: Network interface to use for request. + ``curl_httpclient`` only; see note below. + :arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will + be run with each chunk of data as it is received, and + ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in + the final response. + :arg collections.abc.Callable header_callback: If set, ``header_callback`` will + be run with each header line as it is received (including the + first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line + containing only ``\r\n``. All lines include the trailing newline + characters). ``HTTPResponse.headers`` will be empty in the final + response. This is most useful in conjunction with + ``streaming_callback``, because it's the only way to get access to + header data while the request is in progress. + :arg collections.abc.Callable prepare_curl_callback: If set, will be called with + a ``pycurl.Curl`` object to allow the application to make additional + ``setopt`` calls. + :arg str proxy_host: HTTP proxy hostname. To use proxies, + ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``, + ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are + currently only supported with ``curl_httpclient``. + :arg int proxy_port: HTTP proxy port + :arg str proxy_username: HTTP proxy username + :arg str proxy_password: HTTP proxy password + :arg str proxy_auth_mode: HTTP proxy Authentication mode; + default is "basic". supports "basic" and "digest" + :arg bool allow_nonstandard_methods: Allow unknown values for ``method`` + argument? Default is False. + :arg bool validate_cert: For HTTPS requests, validate the server's + certificate? Default is True. + :arg str ca_certs: filename of CA certificates in PEM format, + or None to use defaults. See note below when used with + ``curl_httpclient``. + :arg str client_key: Filename for client SSL key, if any. See + note below when used with ``curl_httpclient``. + :arg str client_cert: Filename for client SSL certificate, if any. + See note below when used with ``curl_httpclient``. + :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in + ``simple_httpclient`` (unsupported by ``curl_httpclient``). + Overrides ``validate_cert``, ``ca_certs``, ``client_key``, + and ``client_cert``. + :arg bool allow_ipv6: Use IPv6 when available? Default is true. + :arg bool expect_100_continue: If true, send the + ``Expect: 100-continue`` header and wait for a continue response + before sending the request body. Only supported with + simple_httpclient. + + .. note:: + + When using ``curl_httpclient`` certain options may be + inherited by subsequent fetches because ``pycurl`` does + not allow them to be cleanly reset. This applies to the + ``ca_certs``, ``client_key``, ``client_cert``, and + ``network_interface`` arguments. If you use these + options, you should pass them on every request (you don't + have to always use the same values, but it's not possible + to mix requests that specify these options with ones that + use the defaults). + + .. versionadded:: 3.1 + The ``auth_mode`` argument. + + .. versionadded:: 4.0 + The ``body_producer`` and ``expect_100_continue`` arguments. + + .. versionadded:: 4.2 + The ``ssl_options`` argument. + + .. versionadded:: 4.5 + The ``proxy_auth_mode`` argument. + """ + # Note that some of these attributes go through property setters + # defined below. + self.headers = headers + if if_modified_since: + self.headers["If-Modified-Since"] = httputil.format_timestamp( + if_modified_since) + self.proxy_host = proxy_host + self.proxy_port = proxy_port + self.proxy_username = proxy_username + self.proxy_password = proxy_password + self.proxy_auth_mode = proxy_auth_mode + self.url = url + self.method = method + self.body = body + self.body_producer = body_producer + self.auth_username = auth_username + self.auth_password = auth_password + self.auth_mode = auth_mode + self.connect_timeout = connect_timeout + self.request_timeout = request_timeout + self.follow_redirects = follow_redirects + self.max_redirects = max_redirects + self.user_agent = user_agent + if decompress_response is not None: + self.decompress_response = decompress_response + else: + self.decompress_response = use_gzip + self.network_interface = network_interface + self.streaming_callback = streaming_callback + self.header_callback = header_callback + self.prepare_curl_callback = prepare_curl_callback + self.allow_nonstandard_methods = allow_nonstandard_methods + self.validate_cert = validate_cert + self.ca_certs = ca_certs + self.allow_ipv6 = allow_ipv6 + self.client_key = client_key + self.client_cert = client_cert + self.ssl_options = ssl_options + self.expect_100_continue = expect_100_continue + self.start_time = time.time() + + @property + def headers(self): + return self._headers + + @headers.setter + def headers(self, value): + if value is None: + self._headers = httputil.HTTPHeaders() + else: + self._headers = value + + @property + def body(self): + return self._body + + @body.setter + def body(self, value): + self._body = utf8(value) + + @property + def body_producer(self): + return self._body_producer + + @body_producer.setter + def body_producer(self, value): + self._body_producer = stack_context.wrap(value) + + @property + def streaming_callback(self): + return self._streaming_callback + + @streaming_callback.setter + def streaming_callback(self, value): + self._streaming_callback = stack_context.wrap(value) + + @property + def header_callback(self): + return self._header_callback + + @header_callback.setter + def header_callback(self, value): + self._header_callback = stack_context.wrap(value) + + @property + def prepare_curl_callback(self): + return self._prepare_curl_callback + + @prepare_curl_callback.setter + def prepare_curl_callback(self, value): + self._prepare_curl_callback = stack_context.wrap(value) + + +class HTTPResponse(object): + """HTTP Response object. + + Attributes: + + * request: HTTPRequest object + + * code: numeric HTTP status code, e.g. 200 or 404 + + * reason: human-readable reason phrase describing the status code + + * headers: `tornado.httputil.HTTPHeaders` object + + * effective_url: final location of the resource after following any + redirects + + * buffer: ``cStringIO`` object for response body + + * body: response body as bytes (created on demand from ``self.buffer``) + + * error: Exception object, if any + + * request_time: seconds from request start to finish. Includes all network + operations from DNS resolution to receiving the last byte of data. + Does not include time spent in the queue (due to the ``max_clients`` option). + If redirects were followed, only includes the final request. + + * start_time: Time at which the HTTP operation started, based on `time.time` + (not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request + timed out while in the queue. + + * time_info: dictionary of diagnostic timing information from the request. + Available data are subject to change, but currently uses timings + available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html, + plus ``queue``, which is the delay (if any) introduced by waiting for + a slot under `AsyncHTTPClient`'s ``max_clients`` setting. + + .. versionadded:: 5.1 + + Added the ``start_time`` attribute. + + .. versionchanged:: 5.1 + + The ``request_time`` attribute previously included time spent in the queue + for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time + is excluded in both implementations. ``request_time`` is now more accurate for + ``curl_httpclient`` because it uses a monotonic clock when available. + """ + def __init__(self, request, code, headers=None, buffer=None, + effective_url=None, error=None, request_time=None, + time_info=None, reason=None, start_time=None): + if isinstance(request, _RequestProxy): + self.request = request.request + else: + self.request = request + self.code = code + self.reason = reason or httputil.responses.get(code, "Unknown") + if headers is not None: + self.headers = headers + else: + self.headers = httputil.HTTPHeaders() + self.buffer = buffer + self._body = None + if effective_url is None: + self.effective_url = request.url + else: + self.effective_url = effective_url + self._error_is_response_code = False + if error is None: + if self.code < 200 or self.code >= 300: + self._error_is_response_code = True + self.error = HTTPError(self.code, message=self.reason, + response=self) + else: + self.error = None + else: + self.error = error + self.start_time = start_time + self.request_time = request_time + self.time_info = time_info or {} + + @property + def body(self): + if self.buffer is None: + return None + elif self._body is None: + self._body = self.buffer.getvalue() + + return self._body + + def rethrow(self): + """If there was an error on the request, raise an `HTTPError`.""" + if self.error: + raise self.error + + def __repr__(self): + args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items())) + return "%s(%s)" % (self.__class__.__name__, args) + + +class HTTPClientError(Exception): + """Exception thrown for an unsuccessful HTTP request. + + Attributes: + + * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is + used when no HTTP response was received, e.g. for a timeout. + + * ``response`` - `HTTPResponse` object, if any. + + Note that if ``follow_redirects`` is False, redirects become HTTPErrors, + and you can look at ``error.response.headers['Location']`` to see the + destination of the redirect. + + .. versionchanged:: 5.1 + + Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with + `tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains + as an alias. + """ + def __init__(self, code, message=None, response=None): + self.code = code + self.message = message or httputil.responses.get(code, "Unknown") + self.response = response + super(HTTPClientError, self).__init__(code, message, response) + + def __str__(self): + return "HTTP %d: %s" % (self.code, self.message) + + # There is a cyclic reference between self and self.response, + # which breaks the default __repr__ implementation. + # (especially on pypy, which doesn't have the same recursion + # detection as cpython). + __repr__ = __str__ + + +HTTPError = HTTPClientError + + +class _RequestProxy(object): + """Combines an object with a dictionary of defaults. + + Used internally by AsyncHTTPClient implementations. + """ + def __init__(self, request, defaults): + self.request = request + self.defaults = defaults + + def __getattr__(self, name): + request_attr = getattr(self.request, name) + if request_attr is not None: + return request_attr + elif self.defaults is not None: + return self.defaults.get(name, None) + else: + return None + + +def main(): + from tornado.options import define, options, parse_command_line + define("print_headers", type=bool, default=False) + define("print_body", type=bool, default=True) + define("follow_redirects", type=bool, default=True) + define("validate_cert", type=bool, default=True) + define("proxy_host", type=str) + define("proxy_port", type=int) + args = parse_command_line() + client = HTTPClient() + for arg in args: + try: + response = client.fetch(arg, + follow_redirects=options.follow_redirects, + validate_cert=options.validate_cert, + proxy_host=options.proxy_host, + proxy_port=options.proxy_port, + ) + except HTTPError as e: + if e.response is not None: + response = e.response + else: + raise + if options.print_headers: + print(response.headers) + if options.print_body: + print(native_str(response.body)) + client.close() + + +if __name__ == "__main__": + main() diff --git a/server/www/packages/packages-windows/x86/tornado/httpserver.py b/server/www/packages/packages-windows/x86/tornado/httpserver.py new file mode 100644 index 0000000..3498d71 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/httpserver.py @@ -0,0 +1,330 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A non-blocking, single-threaded HTTP server. + +Typical applications have little direct interaction with the `HTTPServer` +class except to start a server at the beginning of the process +(and even that is often done indirectly via `tornado.web.Application.listen`). + +.. versionchanged:: 4.0 + + The ``HTTPRequest`` class that used to live in this module has been moved + to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. +""" + +from __future__ import absolute_import, division, print_function + +import socket + +from tornado.escape import native_str +from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters +from tornado import gen +from tornado import httputil +from tornado import iostream +from tornado import netutil +from tornado.tcpserver import TCPServer +from tornado.util import Configurable + + +class HTTPServer(TCPServer, Configurable, + httputil.HTTPServerConnectionDelegate): + r"""A non-blocking, single-threaded HTTP server. + + A server is defined by a subclass of `.HTTPServerConnectionDelegate`, + or, for backwards compatibility, a callback that takes an + `.HTTPServerRequest` as an argument. The delegate is usually a + `tornado.web.Application`. + + `HTTPServer` supports keep-alive connections by default + (automatically for HTTP/1.1, or for HTTP/1.0 when the client + requests ``Connection: keep-alive``). + + If ``xheaders`` is ``True``, we support the + ``X-Real-Ip``/``X-Forwarded-For`` and + ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the + remote IP and URI scheme/protocol for all requests. These headers + are useful when running Tornado behind a reverse proxy or load + balancer. The ``protocol`` argument can also be set to ``https`` + if Tornado is run behind an SSL-decoding proxy that does not set one of + the supported ``xheaders``. + + By default, when parsing the ``X-Forwarded-For`` header, Tornado will + select the last (i.e., the closest) address on the list of hosts as the + remote host IP address. To select the next server in the chain, a list of + trusted downstream hosts may be passed as the ``trusted_downstream`` + argument. These hosts will be skipped when parsing the ``X-Forwarded-For`` + header. + + To make this server serve SSL traffic, send the ``ssl_options`` keyword + argument with an `ssl.SSLContext` object. For compatibility with older + versions of Python ``ssl_options`` may also be a dictionary of keyword + arguments for the `ssl.wrap_socket` method.:: + + ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), + os.path.join(data_dir, "mydomain.key")) + HTTPServer(application, ssl_options=ssl_ctx) + + `HTTPServer` initialization follows one of three patterns (the + initialization methods are defined on `tornado.tcpserver.TCPServer`): + + 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process:: + + server = HTTPServer(app) + server.listen(8888) + IOLoop.current().start() + + In many cases, `tornado.web.Application.listen` can be used to avoid + the need to explicitly create the `HTTPServer`. + + 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`: + simple multi-process:: + + server = HTTPServer(app) + server.bind(8888) + server.start(0) # Forks multiple sub-processes + IOLoop.current().start() + + When using this interface, an `.IOLoop` must *not* be passed + to the `HTTPServer` constructor. `~.TCPServer.start` will always start + the server on the default singleton `.IOLoop`. + + 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process:: + + sockets = tornado.netutil.bind_sockets(8888) + tornado.process.fork_processes(0) + server = HTTPServer(app) + server.add_sockets(sockets) + IOLoop.current().start() + + The `~.TCPServer.add_sockets` interface is more complicated, + but it can be used with `tornado.process.fork_processes` to + give you more flexibility in when the fork happens. + `~.TCPServer.add_sockets` can also be used in single-process + servers if you want to create your listening sockets in some + way other than `tornado.netutil.bind_sockets`. + + .. versionchanged:: 4.0 + Added ``decompress_request``, ``chunk_size``, ``max_header_size``, + ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` + arguments. Added support for `.HTTPServerConnectionDelegate` + instances as ``request_callback``. + + .. versionchanged:: 4.1 + `.HTTPServerConnectionDelegate.start_request` is now called with + two arguments ``(server_conn, request_conn)`` (in accordance with the + documentation) instead of one ``(request_conn)``. + + .. versionchanged:: 4.2 + `HTTPServer` is now a subclass of `tornado.util.Configurable`. + + .. versionchanged:: 4.5 + Added the ``trusted_downstream`` argument. + + .. versionchanged:: 5.0 + The ``io_loop`` argument has been removed. + """ + def __init__(self, *args, **kwargs): + # Ignore args to __init__; real initialization belongs in + # initialize since we're Configurable. (there's something + # weird in initialization order between this class, + # Configurable, and TCPServer so we can't leave __init__ out + # completely) + pass + + def initialize(self, request_callback, no_keep_alive=False, + xheaders=False, ssl_options=None, protocol=None, + decompress_request=False, + chunk_size=None, max_header_size=None, + idle_connection_timeout=None, body_timeout=None, + max_body_size=None, max_buffer_size=None, + trusted_downstream=None): + self.request_callback = request_callback + self.xheaders = xheaders + self.protocol = protocol + self.conn_params = HTTP1ConnectionParameters( + decompress=decompress_request, + chunk_size=chunk_size, + max_header_size=max_header_size, + header_timeout=idle_connection_timeout or 3600, + max_body_size=max_body_size, + body_timeout=body_timeout, + no_keep_alive=no_keep_alive) + TCPServer.__init__(self, ssl_options=ssl_options, + max_buffer_size=max_buffer_size, + read_chunk_size=chunk_size) + self._connections = set() + self.trusted_downstream = trusted_downstream + + @classmethod + def configurable_base(cls): + return HTTPServer + + @classmethod + def configurable_default(cls): + return HTTPServer + + @gen.coroutine + def close_all_connections(self): + while self._connections: + # Peek at an arbitrary element of the set + conn = next(iter(self._connections)) + yield conn.close() + + def handle_stream(self, stream, address): + context = _HTTPRequestContext(stream, address, + self.protocol, + self.trusted_downstream) + conn = HTTP1ServerConnection( + stream, self.conn_params, context) + self._connections.add(conn) + conn.start_serving(self) + + def start_request(self, server_conn, request_conn): + if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate): + delegate = self.request_callback.start_request(server_conn, request_conn) + else: + delegate = _CallableAdapter(self.request_callback, request_conn) + + if self.xheaders: + delegate = _ProxyAdapter(delegate, request_conn) + + return delegate + + def on_close(self, server_conn): + self._connections.remove(server_conn) + + +class _CallableAdapter(httputil.HTTPMessageDelegate): + def __init__(self, request_callback, request_conn): + self.connection = request_conn + self.request_callback = request_callback + self.request = None + self.delegate = None + self._chunks = [] + + def headers_received(self, start_line, headers): + self.request = httputil.HTTPServerRequest( + connection=self.connection, start_line=start_line, + headers=headers) + + def data_received(self, chunk): + self._chunks.append(chunk) + + def finish(self): + self.request.body = b''.join(self._chunks) + self.request._parse_body() + self.request_callback(self.request) + + def on_connection_close(self): + self._chunks = None + + +class _HTTPRequestContext(object): + def __init__(self, stream, address, protocol, trusted_downstream=None): + self.address = address + # Save the socket's address family now so we know how to + # interpret self.address even after the stream is closed + # and its socket attribute replaced with None. + if stream.socket is not None: + self.address_family = stream.socket.family + else: + self.address_family = None + # In HTTPServerRequest we want an IP, not a full socket address. + if (self.address_family in (socket.AF_INET, socket.AF_INET6) and + address is not None): + self.remote_ip = address[0] + else: + # Unix (or other) socket; fake the remote address. + self.remote_ip = '0.0.0.0' + if protocol: + self.protocol = protocol + elif isinstance(stream, iostream.SSLIOStream): + self.protocol = "https" + else: + self.protocol = "http" + self._orig_remote_ip = self.remote_ip + self._orig_protocol = self.protocol + self.trusted_downstream = set(trusted_downstream or []) + + def __str__(self): + if self.address_family in (socket.AF_INET, socket.AF_INET6): + return self.remote_ip + elif isinstance(self.address, bytes): + # Python 3 with the -bb option warns about str(bytes), + # so convert it explicitly. + # Unix socket addresses are str on mac but bytes on linux. + return native_str(self.address) + else: + return str(self.address) + + def _apply_xheaders(self, headers): + """Rewrite the ``remote_ip`` and ``protocol`` fields.""" + # Squid uses X-Forwarded-For, others use X-Real-Ip + ip = headers.get("X-Forwarded-For", self.remote_ip) + # Skip trusted downstream hosts in X-Forwarded-For list + for ip in (cand.strip() for cand in reversed(ip.split(','))): + if ip not in self.trusted_downstream: + break + ip = headers.get("X-Real-Ip", ip) + if netutil.is_valid_ip(ip): + self.remote_ip = ip + # AWS uses X-Forwarded-Proto + proto_header = headers.get( + "X-Scheme", headers.get("X-Forwarded-Proto", + self.protocol)) + if proto_header: + # use only the last proto entry if there is more than one + # TODO: support trusting mutiple layers of proxied protocol + proto_header = proto_header.split(',')[-1].strip() + if proto_header in ("http", "https"): + self.protocol = proto_header + + def _unapply_xheaders(self): + """Undo changes from `_apply_xheaders`. + + Xheaders are per-request so they should not leak to the next + request on the same connection. + """ + self.remote_ip = self._orig_remote_ip + self.protocol = self._orig_protocol + + +class _ProxyAdapter(httputil.HTTPMessageDelegate): + def __init__(self, delegate, request_conn): + self.connection = request_conn + self.delegate = delegate + + def headers_received(self, start_line, headers): + self.connection.context._apply_xheaders(headers) + return self.delegate.headers_received(start_line, headers) + + def data_received(self, chunk): + return self.delegate.data_received(chunk) + + def finish(self): + self.delegate.finish() + self._cleanup() + + def on_connection_close(self): + self.delegate.on_connection_close() + self._cleanup() + + def _cleanup(self): + self.connection.context._unapply_xheaders() + + +HTTPRequest = httputil.HTTPServerRequest diff --git a/server/www/packages/packages-windows/x86/tornado/httputil.py b/server/www/packages/packages-windows/x86/tornado/httputil.py new file mode 100644 index 0000000..3961446 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/httputil.py @@ -0,0 +1,1095 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""HTTP utility code shared by clients and servers. + +This module also defines the `HTTPServerRequest` class which is exposed +via `tornado.web.RequestHandler.request`. +""" + +from __future__ import absolute_import, division, print_function + +import calendar +import collections +import copy +import datetime +import email.utils +import numbers +import re +import time +import unicodedata +import warnings + +from tornado.escape import native_str, parse_qs_bytes, utf8 +from tornado.log import gen_log +from tornado.util import ObjectDict, PY3, unicode_type + +if PY3: + import http.cookies as Cookie + from http.client import responses + from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl +else: + import Cookie + from httplib import responses + from urllib import urlencode + from urlparse import urlparse, urlunparse, parse_qsl + + +# responses is unused in this file, but we re-export it to other files. +# Reference it so pyflakes doesn't complain. +responses + +try: + from ssl import SSLError +except ImportError: + # ssl is unavailable on app engine. + class _SSLError(Exception): + pass + # Hack around a mypy limitation. We can't simply put "type: ignore" + # on the class definition itself; must go through an assignment. + SSLError = _SSLError # type: ignore + +try: + import typing # noqa: F401 +except ImportError: + pass + + +# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line +# terminator and ignore any preceding CR. +_CRLF_RE = re.compile(r'\r?\n') + + +class _NormalizedHeaderCache(dict): + """Dynamic cached mapping of header names to Http-Header-Case. + + Implemented as a dict subclass so that cache hits are as fast as a + normal dict lookup, without the overhead of a python function + call. + + >>> normalized_headers = _NormalizedHeaderCache(10) + >>> normalized_headers["coNtent-TYPE"] + 'Content-Type' + """ + def __init__(self, size): + super(_NormalizedHeaderCache, self).__init__() + self.size = size + self.queue = collections.deque() + + def __missing__(self, key): + normalized = "-".join([w.capitalize() for w in key.split("-")]) + self[key] = normalized + self.queue.append(key) + if len(self.queue) > self.size: + # Limit the size of the cache. LRU would be better, but this + # simpler approach should be fine. In Python 2.7+ we could + # use OrderedDict (or in 3.2+, @functools.lru_cache). + old_key = self.queue.popleft() + del self[old_key] + return normalized + + +_normalized_headers = _NormalizedHeaderCache(1000) + + +class HTTPHeaders(collections.MutableMapping): + """A dictionary that maintains ``Http-Header-Case`` for all keys. + + Supports multiple values per key via a pair of new methods, + `add()` and `get_list()`. The regular dictionary interface + returns a single value per key, with multiple values joined by a + comma. + + >>> h = HTTPHeaders({"content-type": "text/html"}) + >>> list(h.keys()) + ['Content-Type'] + >>> h["Content-Type"] + 'text/html' + + >>> h.add("Set-Cookie", "A=B") + >>> h.add("Set-Cookie", "C=D") + >>> h["set-cookie"] + 'A=B,C=D' + >>> h.get_list("set-cookie") + ['A=B', 'C=D'] + + >>> for (k,v) in sorted(h.get_all()): + ... print('%s: %s' % (k,v)) + ... + Content-Type: text/html + Set-Cookie: A=B + Set-Cookie: C=D + """ + def __init__(self, *args, **kwargs): + self._dict = {} # type: typing.Dict[str, str] + self._as_list = {} # type: typing.Dict[str, typing.List[str]] + self._last_key = None + if (len(args) == 1 and len(kwargs) == 0 and + isinstance(args[0], HTTPHeaders)): + # Copy constructor + for k, v in args[0].get_all(): + self.add(k, v) + else: + # Dict-style initialization + self.update(*args, **kwargs) + + # new public methods + + def add(self, name, value): + # type: (str, str) -> None + """Adds a new value for the given key.""" + norm_name = _normalized_headers[name] + self._last_key = norm_name + if norm_name in self: + self._dict[norm_name] = (native_str(self[norm_name]) + ',' + + native_str(value)) + self._as_list[norm_name].append(value) + else: + self[norm_name] = value + + def get_list(self, name): + """Returns all values for the given header as a list.""" + norm_name = _normalized_headers[name] + return self._as_list.get(norm_name, []) + + def get_all(self): + # type: () -> typing.Iterable[typing.Tuple[str, str]] + """Returns an iterable of all (name, value) pairs. + + If a header has multiple values, multiple pairs will be + returned with the same name. + """ + for name, values in self._as_list.items(): + for value in values: + yield (name, value) + + def parse_line(self, line): + """Updates the dictionary with a single header line. + + >>> h = HTTPHeaders() + >>> h.parse_line("Content-Type: text/html") + >>> h.get('content-type') + 'text/html' + """ + if line[0].isspace(): + # continuation of a multi-line header + if self._last_key is None: + raise HTTPInputError("first header line cannot start with whitespace") + new_part = ' ' + line.lstrip() + self._as_list[self._last_key][-1] += new_part + self._dict[self._last_key] += new_part + else: + try: + name, value = line.split(":", 1) + except ValueError: + raise HTTPInputError("no colon in header line") + self.add(name, value.strip()) + + @classmethod + def parse(cls, headers): + """Returns a dictionary from HTTP header text. + + >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") + >>> sorted(h.items()) + [('Content-Length', '42'), ('Content-Type', 'text/html')] + + .. versionchanged:: 5.1 + + Raises `HTTPInputError` on malformed headers instead of a + mix of `KeyError`, and `ValueError`. + + """ + h = cls() + for line in _CRLF_RE.split(headers): + if line: + h.parse_line(line) + return h + + # MutableMapping abstract method implementations. + + def __setitem__(self, name, value): + norm_name = _normalized_headers[name] + self._dict[norm_name] = value + self._as_list[norm_name] = [value] + + def __getitem__(self, name): + # type: (str) -> str + return self._dict[_normalized_headers[name]] + + def __delitem__(self, name): + norm_name = _normalized_headers[name] + del self._dict[norm_name] + del self._as_list[norm_name] + + def __len__(self): + return len(self._dict) + + def __iter__(self): + return iter(self._dict) + + def copy(self): + # defined in dict but not in MutableMapping. + return HTTPHeaders(self) + + # Use our overridden copy method for the copy.copy module. + # This makes shallow copies one level deeper, but preserves + # the appearance that HTTPHeaders is a single container. + __copy__ = copy + + def __str__(self): + lines = [] + for name, value in self.get_all(): + lines.append("%s: %s\n" % (name, value)) + return "".join(lines) + + __unicode__ = __str__ + + +class HTTPServerRequest(object): + """A single HTTP request. + + All attributes are type `str` unless otherwise noted. + + .. attribute:: method + + HTTP request method, e.g. "GET" or "POST" + + .. attribute:: uri + + The requested uri. + + .. attribute:: path + + The path portion of `uri` + + .. attribute:: query + + The query portion of `uri` + + .. attribute:: version + + HTTP version specified in request, e.g. "HTTP/1.1" + + .. attribute:: headers + + `.HTTPHeaders` dictionary-like object for request headers. Acts like + a case-insensitive dictionary with additional methods for repeated + headers. + + .. attribute:: body + + Request body, if present, as a byte string. + + .. attribute:: remote_ip + + Client's IP address as a string. If ``HTTPServer.xheaders`` is set, + will pass along the real IP address provided by a load balancer + in the ``X-Real-Ip`` or ``X-Forwarded-For`` header. + + .. versionchanged:: 3.1 + The list format of ``X-Forwarded-For`` is now supported. + + .. attribute:: protocol + + The protocol used, either "http" or "https". If ``HTTPServer.xheaders`` + is set, will pass along the protocol used by a load balancer if + reported via an ``X-Scheme`` header. + + .. attribute:: host + + The requested hostname, usually taken from the ``Host`` header. + + .. attribute:: arguments + + GET/POST arguments are available in the arguments property, which + maps arguments names to lists of values (to support multiple values + for individual names). Names are of type `str`, while arguments + are byte strings. Note that this is different from + `.RequestHandler.get_argument`, which returns argument values as + unicode strings. + + .. attribute:: query_arguments + + Same format as ``arguments``, but contains only arguments extracted + from the query string. + + .. versionadded:: 3.2 + + .. attribute:: body_arguments + + Same format as ``arguments``, but contains only arguments extracted + from the request body. + + .. versionadded:: 3.2 + + .. attribute:: files + + File uploads are available in the files property, which maps file + names to lists of `.HTTPFile`. + + .. attribute:: connection + + An HTTP request is attached to a single HTTP connection, which can + be accessed through the "connection" attribute. Since connections + are typically kept open in HTTP/1.1, multiple requests can be handled + sequentially on a single connection. + + .. versionchanged:: 4.0 + Moved from ``tornado.httpserver.HTTPRequest``. + """ + def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None, + body=None, host=None, files=None, connection=None, + start_line=None, server_connection=None): + if start_line is not None: + method, uri, version = start_line + self.method = method + self.uri = uri + self.version = version + self.headers = headers or HTTPHeaders() + self.body = body or b"" + + # set remote IP and protocol + context = getattr(connection, 'context', None) + self.remote_ip = getattr(context, 'remote_ip', None) + self.protocol = getattr(context, 'protocol', "http") + + self.host = host or self.headers.get("Host") or "127.0.0.1" + self.host_name = split_host_and_port(self.host.lower())[0] + self.files = files or {} + self.connection = connection + self.server_connection = server_connection + self._start_time = time.time() + self._finish_time = None + + self.path, sep, self.query = uri.partition('?') + self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) + self.query_arguments = copy.deepcopy(self.arguments) + self.body_arguments = {} + + def supports_http_1_1(self): + """Returns True if this request supports HTTP/1.1 semantics. + + .. deprecated:: 4.0 + + Applications are less likely to need this information with + the introduction of `.HTTPConnection`. If you still need + it, access the ``version`` attribute directly. This method + will be removed in Tornado 6.0. + + """ + warnings.warn("supports_http_1_1() is deprecated, use request.version instead", + DeprecationWarning) + return self.version == "HTTP/1.1" + + @property + def cookies(self): + """A dictionary of Cookie.Morsel objects.""" + if not hasattr(self, "_cookies"): + self._cookies = Cookie.SimpleCookie() + if "Cookie" in self.headers: + try: + parsed = parse_cookie(self.headers["Cookie"]) + except Exception: + pass + else: + for k, v in parsed.items(): + try: + self._cookies[k] = v + except Exception: + # SimpleCookie imposes some restrictions on keys; + # parse_cookie does not. Discard any cookies + # with disallowed keys. + pass + return self._cookies + + def write(self, chunk, callback=None): + """Writes the given chunk to the response stream. + + .. deprecated:: 4.0 + Use ``request.connection`` and the `.HTTPConnection` methods + to write the response. This method will be removed in Tornado 6.0. + """ + warnings.warn("req.write deprecated, use req.connection.write and write_headers instead", + DeprecationWarning) + assert isinstance(chunk, bytes) + assert self.version.startswith("HTTP/1."), \ + "deprecated interface only supported in HTTP/1.x" + self.connection.write(chunk, callback=callback) + + def finish(self): + """Finishes this HTTP request on the open connection. + + .. deprecated:: 4.0 + Use ``request.connection`` and the `.HTTPConnection` methods + to write the response. This method will be removed in Tornado 6.0. + """ + warnings.warn("req.finish deprecated, use req.connection.finish instead", + DeprecationWarning) + self.connection.finish() + self._finish_time = time.time() + + def full_url(self): + """Reconstructs the full URL for this request.""" + return self.protocol + "://" + self.host + self.uri + + def request_time(self): + """Returns the amount of time it took for this request to execute.""" + if self._finish_time is None: + return time.time() - self._start_time + else: + return self._finish_time - self._start_time + + def get_ssl_certificate(self, binary_form=False): + """Returns the client's SSL certificate, if any. + + To use client certificates, the HTTPServer's + `ssl.SSLContext.verify_mode` field must be set, e.g.:: + + ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ssl_ctx.load_cert_chain("foo.crt", "foo.key") + ssl_ctx.load_verify_locations("cacerts.pem") + ssl_ctx.verify_mode = ssl.CERT_REQUIRED + server = HTTPServer(app, ssl_options=ssl_ctx) + + By default, the return value is a dictionary (or None, if no + client certificate is present). If ``binary_form`` is true, a + DER-encoded form of the certificate is returned instead. See + SSLSocket.getpeercert() in the standard library for more + details. + http://docs.python.org/library/ssl.html#sslsocket-objects + """ + try: + return self.connection.stream.socket.getpeercert( + binary_form=binary_form) + except SSLError: + return None + + def _parse_body(self): + parse_body_arguments( + self.headers.get("Content-Type", ""), self.body, + self.body_arguments, self.files, + self.headers) + + for k, v in self.body_arguments.items(): + self.arguments.setdefault(k, []).extend(v) + + def __repr__(self): + attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") + args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) + return "%s(%s)" % (self.__class__.__name__, args) + + +class HTTPInputError(Exception): + """Exception class for malformed HTTP requests or responses + from remote sources. + + .. versionadded:: 4.0 + """ + pass + + +class HTTPOutputError(Exception): + """Exception class for errors in HTTP output. + + .. versionadded:: 4.0 + """ + pass + + +class HTTPServerConnectionDelegate(object): + """Implement this interface to handle requests from `.HTTPServer`. + + .. versionadded:: 4.0 + """ + def start_request(self, server_conn, request_conn): + """This method is called by the server when a new request has started. + + :arg server_conn: is an opaque object representing the long-lived + (e.g. tcp-level) connection. + :arg request_conn: is a `.HTTPConnection` object for a single + request/response exchange. + + This method should return a `.HTTPMessageDelegate`. + """ + raise NotImplementedError() + + def on_close(self, server_conn): + """This method is called when a connection has been closed. + + :arg server_conn: is a server connection that has previously been + passed to ``start_request``. + """ + pass + + +class HTTPMessageDelegate(object): + """Implement this interface to handle an HTTP request or response. + + .. versionadded:: 4.0 + """ + def headers_received(self, start_line, headers): + """Called when the HTTP headers have been received and parsed. + + :arg start_line: a `.RequestStartLine` or `.ResponseStartLine` + depending on whether this is a client or server message. + :arg headers: a `.HTTPHeaders` instance. + + Some `.HTTPConnection` methods can only be called during + ``headers_received``. + + May return a `.Future`; if it does the body will not be read + until it is done. + """ + pass + + def data_received(self, chunk): + """Called when a chunk of data has been received. + + May return a `.Future` for flow control. + """ + pass + + def finish(self): + """Called after the last chunk of data has been received.""" + pass + + def on_connection_close(self): + """Called if the connection is closed without finishing the request. + + If ``headers_received`` is called, either ``finish`` or + ``on_connection_close`` will be called, but not both. + """ + pass + + +class HTTPConnection(object): + """Applications use this interface to write their responses. + + .. versionadded:: 4.0 + """ + def write_headers(self, start_line, headers, chunk=None, callback=None): + """Write an HTTP header block. + + :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`. + :arg headers: a `.HTTPHeaders` instance. + :arg chunk: the first (optional) chunk of data. This is an optimization + so that small responses can be written in the same call as their + headers. + :arg callback: a callback to be run when the write is complete. + + The ``version`` field of ``start_line`` is ignored. + + Returns a `.Future` if no callback is given. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. + """ + raise NotImplementedError() + + def write(self, chunk, callback=None): + """Writes a chunk of body data. + + The callback will be run when the write is complete. If no callback + is given, returns a Future. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. + """ + raise NotImplementedError() + + def finish(self): + """Indicates that the last body data has been written. + """ + raise NotImplementedError() + + +def url_concat(url, args): + """Concatenate url and arguments regardless of whether + url has existing query parameters. + + ``args`` may be either a dictionary or a list of key-value pairs + (the latter allows for multiple values with the same key. + + >>> url_concat("http://example.com/foo", dict(c="d")) + 'http://example.com/foo?c=d' + >>> url_concat("http://example.com/foo?a=b", dict(c="d")) + 'http://example.com/foo?a=b&c=d' + >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")]) + 'http://example.com/foo?a=b&c=d&c=d2' + """ + if args is None: + return url + parsed_url = urlparse(url) + if isinstance(args, dict): + parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) + parsed_query.extend(args.items()) + elif isinstance(args, list) or isinstance(args, tuple): + parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) + parsed_query.extend(args) + else: + err = "'args' parameter should be dict, list or tuple. Not {0}".format( + type(args)) + raise TypeError(err) + final_query = urlencode(parsed_query) + url = urlunparse(( + parsed_url[0], + parsed_url[1], + parsed_url[2], + parsed_url[3], + final_query, + parsed_url[5])) + return url + + +class HTTPFile(ObjectDict): + """Represents a file uploaded via a form. + + For backwards compatibility, its instance attributes are also + accessible as dictionary keys. + + * ``filename`` + * ``body`` + * ``content_type`` + """ + pass + + +def _parse_request_range(range_header): + """Parses a Range header. + + Returns either ``None`` or tuple ``(start, end)``. + Note that while the HTTP headers use inclusive byte positions, + this method returns indexes suitable for use in slices. + + >>> start, end = _parse_request_range("bytes=1-2") + >>> start, end + (1, 3) + >>> [0, 1, 2, 3, 4][start:end] + [1, 2] + >>> _parse_request_range("bytes=6-") + (6, None) + >>> _parse_request_range("bytes=-6") + (-6, None) + >>> _parse_request_range("bytes=-0") + (None, 0) + >>> _parse_request_range("bytes=") + (None, None) + >>> _parse_request_range("foo=42") + >>> _parse_request_range("bytes=1-2,6-10") + + Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). + + See [0] for the details of the range header. + + [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges + """ + unit, _, value = range_header.partition("=") + unit, value = unit.strip(), value.strip() + if unit != "bytes": + return None + start_b, _, end_b = value.partition("-") + try: + start = _int_or_none(start_b) + end = _int_or_none(end_b) + except ValueError: + return None + if end is not None: + if start is None: + if end != 0: + start = -end + end = None + else: + end += 1 + return (start, end) + + +def _get_content_range(start, end, total): + """Returns a suitable Content-Range header: + + >>> print(_get_content_range(None, 1, 4)) + bytes 0-0/4 + >>> print(_get_content_range(1, 3, 4)) + bytes 1-2/4 + >>> print(_get_content_range(None, None, 4)) + bytes 0-3/4 + """ + start = start or 0 + end = (end or total) - 1 + return "bytes %s-%s/%s" % (start, end, total) + + +def _int_or_none(val): + val = val.strip() + if val == "": + return None + return int(val) + + +def parse_body_arguments(content_type, body, arguments, files, headers=None): + """Parses a form request body. + + Supports ``application/x-www-form-urlencoded`` and + ``multipart/form-data``. The ``content_type`` parameter should be + a string and ``body`` should be a byte string. The ``arguments`` + and ``files`` parameters are dictionaries that will be updated + with the parsed contents. + """ + if headers and 'Content-Encoding' in headers: + gen_log.warning("Unsupported Content-Encoding: %s", + headers['Content-Encoding']) + return + if content_type.startswith("application/x-www-form-urlencoded"): + try: + uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) + except Exception as e: + gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) + uri_arguments = {} + for name, values in uri_arguments.items(): + if values: + arguments.setdefault(name, []).extend(values) + elif content_type.startswith("multipart/form-data"): + try: + fields = content_type.split(";") + for field in fields: + k, sep, v = field.strip().partition("=") + if k == "boundary" and v: + parse_multipart_form_data(utf8(v), body, arguments, files) + break + else: + raise ValueError("multipart boundary not found") + except Exception as e: + gen_log.warning("Invalid multipart/form-data: %s", e) + + +def parse_multipart_form_data(boundary, data, arguments, files): + """Parses a ``multipart/form-data`` body. + + The ``boundary`` and ``data`` parameters are both byte strings. + The dictionaries given in the arguments and files parameters + will be updated with the contents of the body. + + .. versionchanged:: 5.1 + + Now recognizes non-ASCII filenames in RFC 2231/5987 + (``filename*=``) format. + """ + # The standard allows for the boundary to be quoted in the header, + # although it's rare (it happens at least for google app engine + # xmpp). I think we're also supposed to handle backslash-escapes + # here but I'll save that until we see a client that uses them + # in the wild. + if boundary.startswith(b'"') and boundary.endswith(b'"'): + boundary = boundary[1:-1] + final_boundary_index = data.rfind(b"--" + boundary + b"--") + if final_boundary_index == -1: + gen_log.warning("Invalid multipart/form-data: no final boundary") + return + parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") + for part in parts: + if not part: + continue + eoh = part.find(b"\r\n\r\n") + if eoh == -1: + gen_log.warning("multipart/form-data missing headers") + continue + headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) + disp_header = headers.get("Content-Disposition", "") + disposition, disp_params = _parse_header(disp_header) + if disposition != "form-data" or not part.endswith(b"\r\n"): + gen_log.warning("Invalid multipart/form-data") + continue + value = part[eoh + 4:-2] + if not disp_params.get("name"): + gen_log.warning("multipart/form-data value missing name") + continue + name = disp_params["name"] + if disp_params.get("filename"): + ctype = headers.get("Content-Type", "application/unknown") + files.setdefault(name, []).append(HTTPFile( # type: ignore + filename=disp_params["filename"], body=value, + content_type=ctype)) + else: + arguments.setdefault(name, []).append(value) + + +def format_timestamp(ts): + """Formats a timestamp in the format used by HTTP. + + The argument may be a numeric timestamp as returned by `time.time`, + a time tuple as returned by `time.gmtime`, or a `datetime.datetime` + object. + + >>> format_timestamp(1359312200) + 'Sun, 27 Jan 2013 18:43:20 GMT' + """ + if isinstance(ts, numbers.Real): + pass + elif isinstance(ts, (tuple, time.struct_time)): + ts = calendar.timegm(ts) + elif isinstance(ts, datetime.datetime): + ts = calendar.timegm(ts.utctimetuple()) + else: + raise TypeError("unknown timestamp type: %r" % ts) + return email.utils.formatdate(ts, usegmt=True) + + +RequestStartLine = collections.namedtuple( + 'RequestStartLine', ['method', 'path', 'version']) + + +def parse_request_start_line(line): + """Returns a (method, path, version) tuple for an HTTP 1.x request line. + + The response is a `collections.namedtuple`. + + >>> parse_request_start_line("GET /foo HTTP/1.1") + RequestStartLine(method='GET', path='/foo', version='HTTP/1.1') + """ + try: + method, path, version = line.split(" ") + except ValueError: + # https://tools.ietf.org/html/rfc7230#section-3.1.1 + # invalid request-line SHOULD respond with a 400 (Bad Request) + raise HTTPInputError("Malformed HTTP request line") + if not re.match(r"^HTTP/1\.[0-9]$", version): + raise HTTPInputError( + "Malformed HTTP version in HTTP Request-Line: %r" % version) + return RequestStartLine(method, path, version) + + +ResponseStartLine = collections.namedtuple( + 'ResponseStartLine', ['version', 'code', 'reason']) + + +def parse_response_start_line(line): + """Returns a (version, code, reason) tuple for an HTTP 1.x response line. + + The response is a `collections.namedtuple`. + + >>> parse_response_start_line("HTTP/1.1 200 OK") + ResponseStartLine(version='HTTP/1.1', code=200, reason='OK') + """ + line = native_str(line) + match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line) + if not match: + raise HTTPInputError("Error parsing response start line") + return ResponseStartLine(match.group(1), int(match.group(2)), + match.group(3)) + +# _parseparam and _parse_header are copied and modified from python2.7's cgi.py +# The original 2.7 version of this code did not correctly support some +# combinations of semicolons and double quotes. +# It has also been modified to support valueless parameters as seen in +# websocket extension negotiations, and to support non-ascii values in +# RFC 2231/5987 format. + + +def _parseparam(s): + while s[:1] == ';': + s = s[1:] + end = s.find(';') + while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: + end = s.find(';', end + 1) + if end < 0: + end = len(s) + f = s[:end] + yield f.strip() + s = s[end:] + + +def _parse_header(line): + r"""Parse a Content-type like header. + + Return the main content-type and a dictionary of options. + + >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st" + >>> ct, d = _parse_header(d) + >>> ct + 'form-data' + >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape') + True + >>> d['foo'] + 'b\\a"r' + """ + parts = _parseparam(';' + line) + key = next(parts) + # decode_params treats first argument special, but we already stripped key + params = [('Dummy', 'value')] + for p in parts: + i = p.find('=') + if i >= 0: + name = p[:i].strip().lower() + value = p[i + 1:].strip() + params.append((name, native_str(value))) + params = email.utils.decode_params(params) + params.pop(0) # get rid of the dummy again + pdict = {} + for name, value in params: + value = email.utils.collapse_rfc2231_value(value) + if len(value) >= 2 and value[0] == '"' and value[-1] == '"': + value = value[1:-1] + pdict[name] = value + return key, pdict + + +def _encode_header(key, pdict): + """Inverse of _parse_header. + + >>> _encode_header('permessage-deflate', + ... {'client_max_window_bits': 15, 'client_no_context_takeover': None}) + 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover' + """ + if not pdict: + return key + out = [key] + # Sort the parameters just to make it easy to test. + for k, v in sorted(pdict.items()): + if v is None: + out.append(k) + else: + # TODO: quote if necessary. + out.append('%s=%s' % (k, v)) + return '; '.join(out) + + +def encode_username_password(username, password): + """Encodes a username/password pair in the format used by HTTP auth. + + The return value is a byte string in the form ``username:password``. + + .. versionadded:: 5.1 + """ + if isinstance(username, unicode_type): + username = unicodedata.normalize('NFC', username) + if isinstance(password, unicode_type): + password = unicodedata.normalize('NFC', password) + return utf8(username) + b":" + utf8(password) + + +def doctests(): + import doctest + return doctest.DocTestSuite() + + +def split_host_and_port(netloc): + """Returns ``(host, port)`` tuple from ``netloc``. + + Returned ``port`` will be ``None`` if not present. + + .. versionadded:: 4.1 + """ + match = re.match(r'^(.+):(\d+)$', netloc) + if match: + host = match.group(1) + port = int(match.group(2)) + else: + host = netloc + port = None + return (host, port) + + +def qs_to_qsl(qs): + """Generator converting a result of ``parse_qs`` back to name-value pairs. + + .. versionadded:: 5.0 + """ + for k, vs in qs.items(): + for v in vs: + yield (k, v) + + +_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") +_QuotePatt = re.compile(r"[\\].") +_nulljoin = ''.join + + +def _unquote_cookie(str): + """Handle double quotes and escaping in cookie values. + + This method is copied verbatim from the Python 3.5 standard + library (http.cookies._unquote) so we don't have to depend on + non-public interfaces. + """ + # If there aren't any doublequotes, + # then there can't be any special characters. See RFC 2109. + if str is None or len(str) < 2: + return str + if str[0] != '"' or str[-1] != '"': + return str + + # We have to assume that we must decode this string. + # Down to work. + + # Remove the "s + str = str[1:-1] + + # Check for special sequences. Examples: + # \012 --> \n + # \" --> " + # + i = 0 + n = len(str) + res = [] + while 0 <= i < n: + o_match = _OctalPatt.search(str, i) + q_match = _QuotePatt.search(str, i) + if not o_match and not q_match: # Neither matched + res.append(str[i:]) + break + # else: + j = k = -1 + if o_match: + j = o_match.start(0) + if q_match: + k = q_match.start(0) + if q_match and (not o_match or k < j): # QuotePatt matched + res.append(str[i:k]) + res.append(str[k + 1]) + i = k + 2 + else: # OctalPatt matched + res.append(str[i:j]) + res.append(chr(int(str[j + 1:j + 4], 8))) + i = j + 4 + return _nulljoin(res) + + +def parse_cookie(cookie): + """Parse a ``Cookie`` HTTP header into a dict of name/value pairs. + + This function attempts to mimic browser cookie parsing behavior; + it specifically does not follow any of the cookie-related RFCs + (because browsers don't either). + + The algorithm used is identical to that used by Django version 1.9.10. + + .. versionadded:: 4.4.2 + """ + cookiedict = {} + for chunk in cookie.split(str(';')): + if str('=') in chunk: + key, val = chunk.split(str('='), 1) + else: + # Assume an empty name per + # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 + key, val = str(''), chunk + key, val = key.strip(), val.strip() + if key or val: + # unquote using Python's algorithm. + cookiedict[key] = _unquote_cookie(val) + return cookiedict diff --git a/server/www/packages/packages-windows/x86/tornado/ioloop.py b/server/www/packages/packages-windows/x86/tornado/ioloop.py new file mode 100644 index 0000000..889153a --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/ioloop.py @@ -0,0 +1,1267 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""An I/O event loop for non-blocking sockets. + +On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop. + +Typical applications will use a single `IOLoop` object, accessed via +`IOLoop.current` class method. The `IOLoop.start` method (or +equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually +be called at the end of the ``main()`` function. Atypical applications +may use more than one `IOLoop`, such as one `IOLoop` per thread, or +per `unittest` case. + +In addition to I/O events, the `IOLoop` can also schedule time-based +events. `IOLoop.add_timeout` is a non-blocking alternative to +`time.sleep`. + +""" + +from __future__ import absolute_import, division, print_function + +import collections +import datetime +import errno +import functools +import heapq +import itertools +import logging +import numbers +import os +import select +import sys +import threading +import time +import traceback +import math +import random + +from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501 +from tornado.log import app_log, gen_log +from tornado.platform.auto import set_close_exec, Waker +from tornado import stack_context +from tornado.util import ( + PY3, Configurable, errno_from_exception, timedelta_to_seconds, + TimeoutError, unicode_type, import_object, +) + +try: + import signal +except ImportError: + signal = None + +try: + from concurrent.futures import ThreadPoolExecutor +except ImportError: + ThreadPoolExecutor = None + +if PY3: + import _thread as thread +else: + import thread + +try: + import asyncio +except ImportError: + asyncio = None + + +_POLL_TIMEOUT = 3600.0 + + +class IOLoop(Configurable): + """A level-triggered I/O loop. + + On Python 3, `IOLoop` is a wrapper around the `asyncio` event + loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD + and Mac OS X) if they are available, or else we fall back on + select(). If you are implementing a system that needs to handle + thousands of simultaneous connections, you should use a system + that supports either ``epoll`` or ``kqueue``. + + Example usage for a simple TCP server: + + .. testcode:: + + import errno + import functools + import socket + + import tornado.ioloop + from tornado.iostream import IOStream + + async def handle_connection(connection, address): + stream = IOStream(connection) + message = await stream.read_until_close() + print("message from client:", message.decode().strip()) + + def connection_ready(sock, fd, events): + while True: + try: + connection, address = sock.accept() + except socket.error as e: + if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): + raise + return + connection.setblocking(0) + io_loop = tornado.ioloop.IOLoop.current() + io_loop.spawn_callback(handle_connection, connection, address) + + if __name__ == '__main__': + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.setblocking(0) + sock.bind(("", 8888)) + sock.listen(128) + + io_loop = tornado.ioloop.IOLoop.current() + callback = functools.partial(connection_ready, sock) + io_loop.add_handler(sock.fileno(), callback, io_loop.READ) + io_loop.start() + + .. testoutput:: + :hide: + + By default, a newly-constructed `IOLoop` becomes the thread's current + `IOLoop`, unless there already is a current `IOLoop`. This behavior + can be controlled with the ``make_current`` argument to the `IOLoop` + constructor: if ``make_current=True``, the new `IOLoop` will always + try to become current and it raises an error if there is already a + current instance. If ``make_current=False``, the new `IOLoop` will + not try to become current. + + In general, an `IOLoop` cannot survive a fork or be shared across + processes in any way. When multiple processes are being used, each + process should create its own `IOLoop`, which also implies that + any objects which depend on the `IOLoop` (such as + `.AsyncHTTPClient`) must also be created in the child processes. + As a guideline, anything that starts processes (including the + `tornado.process` and `multiprocessing` modules) should do so as + early as possible, ideally the first thing the application does + after loading its configuration in ``main()``. + + .. versionchanged:: 4.2 + Added the ``make_current`` keyword argument to the `IOLoop` + constructor. + + .. versionchanged:: 5.0 + + Uses the `asyncio` event loop by default. The + ``IOLoop.configure`` method cannot be used on Python 3 except + to redundantly specify the `asyncio` event loop. + + """ + # Constants from the epoll module + _EPOLLIN = 0x001 + _EPOLLPRI = 0x002 + _EPOLLOUT = 0x004 + _EPOLLERR = 0x008 + _EPOLLHUP = 0x010 + _EPOLLRDHUP = 0x2000 + _EPOLLONESHOT = (1 << 30) + _EPOLLET = (1 << 31) + + # Our events map exactly to the epoll events + NONE = 0 + READ = _EPOLLIN + WRITE = _EPOLLOUT + ERROR = _EPOLLERR | _EPOLLHUP + + # In Python 2, _current.instance points to the current IOLoop. + _current = threading.local() + + # In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops. + _ioloop_for_asyncio = dict() + + @classmethod + def configure(cls, impl, **kwargs): + if asyncio is not None: + from tornado.platform.asyncio import BaseAsyncIOLoop + + if isinstance(impl, (str, unicode_type)): + impl = import_object(impl) + if not issubclass(impl, BaseAsyncIOLoop): + raise RuntimeError( + "only AsyncIOLoop is allowed when asyncio is available") + super(IOLoop, cls).configure(impl, **kwargs) + + @staticmethod + def instance(): + """Deprecated alias for `IOLoop.current()`. + + .. versionchanged:: 5.0 + + Previously, this method returned a global singleton + `IOLoop`, in contrast with the per-thread `IOLoop` returned + by `current()`. In nearly all cases the two were the same + (when they differed, it was generally used from non-Tornado + threads to communicate back to the main thread's `IOLoop`). + This distinction is not present in `asyncio`, so in order + to facilitate integration with that package `instance()` + was changed to be an alias to `current()`. Applications + using the cross-thread communications aspect of + `instance()` should instead set their own global variable + to point to the `IOLoop` they want to use. + + .. deprecated:: 5.0 + """ + return IOLoop.current() + + def install(self): + """Deprecated alias for `make_current()`. + + .. versionchanged:: 5.0 + + Previously, this method would set this `IOLoop` as the + global singleton used by `IOLoop.instance()`. Now that + `instance()` is an alias for `current()`, `install()` + is an alias for `make_current()`. + + .. deprecated:: 5.0 + """ + self.make_current() + + @staticmethod + def clear_instance(): + """Deprecated alias for `clear_current()`. + + .. versionchanged:: 5.0 + + Previously, this method would clear the `IOLoop` used as + the global singleton by `IOLoop.instance()`. Now that + `instance()` is an alias for `current()`, + `clear_instance()` is an alias for `clear_current()`. + + .. deprecated:: 5.0 + + """ + IOLoop.clear_current() + + @staticmethod + def current(instance=True): + """Returns the current thread's `IOLoop`. + + If an `IOLoop` is currently running or has been marked as + current by `make_current`, returns that instance. If there is + no current `IOLoop` and ``instance`` is true, creates one. + + .. versionchanged:: 4.1 + Added ``instance`` argument to control the fallback to + `IOLoop.instance()`. + .. versionchanged:: 5.0 + On Python 3, control of the current `IOLoop` is delegated + to `asyncio`, with this and other methods as pass-through accessors. + The ``instance`` argument now controls whether an `IOLoop` + is created automatically when there is none, instead of + whether we fall back to `IOLoop.instance()` (which is now + an alias for this method). ``instance=False`` is deprecated, + since even if we do not create an `IOLoop`, this method + may initialize the asyncio loop. + """ + if asyncio is None: + current = getattr(IOLoop._current, "instance", None) + if current is None and instance: + current = IOLoop() + if IOLoop._current.instance is not current: + raise RuntimeError("new IOLoop did not become current") + else: + try: + loop = asyncio.get_event_loop() + except (RuntimeError, AssertionError): + if not instance: + return None + raise + try: + return IOLoop._ioloop_for_asyncio[loop] + except KeyError: + if instance: + from tornado.platform.asyncio import AsyncIOMainLoop + current = AsyncIOMainLoop(make_current=True) + else: + current = None + return current + + def make_current(self): + """Makes this the `IOLoop` for the current thread. + + An `IOLoop` automatically becomes current for its thread + when it is started, but it is sometimes useful to call + `make_current` explicitly before starting the `IOLoop`, + so that code run at startup time can find the right + instance. + + .. versionchanged:: 4.1 + An `IOLoop` created while there is no current `IOLoop` + will automatically become current. + + .. versionchanged:: 5.0 + This method also sets the current `asyncio` event loop. + """ + # The asyncio event loops override this method. + assert asyncio is None + old = getattr(IOLoop._current, "instance", None) + if old is not None: + old.clear_current() + IOLoop._current.instance = self + + @staticmethod + def clear_current(): + """Clears the `IOLoop` for the current thread. + + Intended primarily for use by test frameworks in between tests. + + .. versionchanged:: 5.0 + This method also clears the current `asyncio` event loop. + """ + old = IOLoop.current(instance=False) + if old is not None: + old._clear_current_hook() + if asyncio is None: + IOLoop._current.instance = None + + def _clear_current_hook(self): + """Instance method called when an IOLoop ceases to be current. + + May be overridden by subclasses as a counterpart to make_current. + """ + pass + + @classmethod + def configurable_base(cls): + return IOLoop + + @classmethod + def configurable_default(cls): + if asyncio is not None: + from tornado.platform.asyncio import AsyncIOLoop + return AsyncIOLoop + return PollIOLoop + + def initialize(self, make_current=None): + if make_current is None: + if IOLoop.current(instance=False) is None: + self.make_current() + elif make_current: + current = IOLoop.current(instance=False) + # AsyncIO loops can already be current by this point. + if current is not None and current is not self: + raise RuntimeError("current IOLoop already exists") + self.make_current() + + def close(self, all_fds=False): + """Closes the `IOLoop`, freeing any resources used. + + If ``all_fds`` is true, all file descriptors registered on the + IOLoop will be closed (not just the ones created by the + `IOLoop` itself). + + Many applications will only use a single `IOLoop` that runs for the + entire lifetime of the process. In that case closing the `IOLoop` + is not necessary since everything will be cleaned up when the + process exits. `IOLoop.close` is provided mainly for scenarios + such as unit tests, which create and destroy a large number of + ``IOLoops``. + + An `IOLoop` must be completely stopped before it can be closed. This + means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must + be allowed to return before attempting to call `IOLoop.close()`. + Therefore the call to `close` will usually appear just after + the call to `start` rather than near the call to `stop`. + + .. versionchanged:: 3.1 + If the `IOLoop` implementation supports non-integer objects + for "file descriptors", those objects will have their + ``close`` method when ``all_fds`` is true. + """ + raise NotImplementedError() + + def add_handler(self, fd, handler, events): + """Registers the given handler to receive the given events for ``fd``. + + The ``fd`` argument may either be an integer file descriptor or + a file-like object with a ``fileno()`` method (and optionally a + ``close()`` method, which may be called when the `IOLoop` is shut + down). + + The ``events`` argument is a bitwise or of the constants + ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. + + When an event occurs, ``handler(fd, events)`` will be run. + + .. versionchanged:: 4.0 + Added the ability to pass file-like objects in addition to + raw file descriptors. + """ + raise NotImplementedError() + + def update_handler(self, fd, events): + """Changes the events we listen for ``fd``. + + .. versionchanged:: 4.0 + Added the ability to pass file-like objects in addition to + raw file descriptors. + """ + raise NotImplementedError() + + def remove_handler(self, fd): + """Stop listening for events on ``fd``. + + .. versionchanged:: 4.0 + Added the ability to pass file-like objects in addition to + raw file descriptors. + """ + raise NotImplementedError() + + def set_blocking_signal_threshold(self, seconds, action): + """Sends a signal if the `IOLoop` is blocked for more than + ``s`` seconds. + + Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy + platform. + + The action parameter is a Python signal handler. Read the + documentation for the `signal` module for more information. + If ``action`` is None, the process will be killed if it is + blocked for too long. + + .. deprecated:: 5.0 + + Not implemented on the `asyncio` event loop. Use the environment + variable ``PYTHONASYNCIODEBUG=1`` instead. This method will be + removed in Tornado 6.0. + """ + raise NotImplementedError() + + def set_blocking_log_threshold(self, seconds): + """Logs a stack trace if the `IOLoop` is blocked for more than + ``s`` seconds. + + Equivalent to ``set_blocking_signal_threshold(seconds, + self.log_stack)`` + + .. deprecated:: 5.0 + + Not implemented on the `asyncio` event loop. Use the environment + variable ``PYTHONASYNCIODEBUG=1`` instead. This method will be + removed in Tornado 6.0. + """ + self.set_blocking_signal_threshold(seconds, self.log_stack) + + def log_stack(self, signal, frame): + """Signal handler to log the stack trace of the current thread. + + For use with `set_blocking_signal_threshold`. + + .. deprecated:: 5.1 + + This method will be removed in Tornado 6.0. + """ + gen_log.warning('IOLoop blocked for %f seconds in\n%s', + self._blocking_signal_threshold, + ''.join(traceback.format_stack(frame))) + + def start(self): + """Starts the I/O loop. + + The loop will run until one of the callbacks calls `stop()`, which + will make the loop stop after the current event iteration completes. + """ + raise NotImplementedError() + + def _setup_logging(self): + """The IOLoop catches and logs exceptions, so it's + important that log output be visible. However, python's + default behavior for non-root loggers (prior to python + 3.2) is to print an unhelpful "no handlers could be + found" message rather than the actual log entry, so we + must explicitly configure logging if we've made it this + far without anything. + + This method should be called from start() in subclasses. + """ + if not any([logging.getLogger().handlers, + logging.getLogger('tornado').handlers, + logging.getLogger('tornado.application').handlers]): + logging.basicConfig() + + def stop(self): + """Stop the I/O loop. + + If the event loop is not currently running, the next call to `start()` + will return immediately. + + Note that even after `stop` has been called, the `IOLoop` is not + completely stopped until `IOLoop.start` has also returned. + Some work that was scheduled before the call to `stop` may still + be run before the `IOLoop` shuts down. + """ + raise NotImplementedError() + + def run_sync(self, func, timeout=None): + """Starts the `IOLoop`, runs the given function, and stops the loop. + + The function must return either an awaitable object or + ``None``. If the function returns an awaitable object, the + `IOLoop` will run until the awaitable is resolved (and + `run_sync()` will return the awaitable's result). If it raises + an exception, the `IOLoop` will stop and the exception will be + re-raised to the caller. + + The keyword-only argument ``timeout`` may be used to set + a maximum duration for the function. If the timeout expires, + a `tornado.util.TimeoutError` is raised. + + This method is useful to allow asynchronous calls in a + ``main()`` function:: + + async def main(): + # do stuff... + + if __name__ == '__main__': + IOLoop.current().run_sync(main) + + .. versionchanged:: 4.3 + Returning a non-``None``, non-awaitable value is now an error. + + .. versionchanged:: 5.0 + If a timeout occurs, the ``func`` coroutine will be cancelled. + + """ + future_cell = [None] + + def run(): + try: + result = func() + if result is not None: + from tornado.gen import convert_yielded + result = convert_yielded(result) + except Exception: + future_cell[0] = Future() + future_set_exc_info(future_cell[0], sys.exc_info()) + else: + if is_future(result): + future_cell[0] = result + else: + future_cell[0] = Future() + future_cell[0].set_result(result) + self.add_future(future_cell[0], lambda future: self.stop()) + self.add_callback(run) + if timeout is not None: + def timeout_callback(): + # If we can cancel the future, do so and wait on it. If not, + # Just stop the loop and return with the task still pending. + # (If we neither cancel nor wait for the task, a warning + # will be logged). + if not future_cell[0].cancel(): + self.stop() + timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback) + self.start() + if timeout is not None: + self.remove_timeout(timeout_handle) + if future_cell[0].cancelled() or not future_cell[0].done(): + raise TimeoutError('Operation timed out after %s seconds' % timeout) + return future_cell[0].result() + + def time(self): + """Returns the current time according to the `IOLoop`'s clock. + + The return value is a floating-point number relative to an + unspecified time in the past. + + By default, the `IOLoop`'s time function is `time.time`. However, + it may be configured to use e.g. `time.monotonic` instead. + Calls to `add_timeout` that pass a number instead of a + `datetime.timedelta` should use this function to compute the + appropriate time, so they can work no matter what time function + is chosen. + """ + return time.time() + + def add_timeout(self, deadline, callback, *args, **kwargs): + """Runs the ``callback`` at the time ``deadline`` from the I/O loop. + + Returns an opaque handle that may be passed to + `remove_timeout` to cancel. + + ``deadline`` may be a number denoting a time (on the same + scale as `IOLoop.time`, normally `time.time`), or a + `datetime.timedelta` object for a deadline relative to the + current time. Since Tornado 4.0, `call_later` is a more + convenient alternative for the relative case since it does not + require a timedelta object. + + Note that it is not safe to call `add_timeout` from other threads. + Instead, you must use `add_callback` to transfer control to the + `IOLoop`'s thread, and then call `add_timeout` from there. + + Subclasses of IOLoop must implement either `add_timeout` or + `call_at`; the default implementations of each will call + the other. `call_at` is usually easier to implement, but + subclasses that wish to maintain compatibility with Tornado + versions prior to 4.0 must use `add_timeout` instead. + + .. versionchanged:: 4.0 + Now passes through ``*args`` and ``**kwargs`` to the callback. + """ + if isinstance(deadline, numbers.Real): + return self.call_at(deadline, callback, *args, **kwargs) + elif isinstance(deadline, datetime.timedelta): + return self.call_at(self.time() + timedelta_to_seconds(deadline), + callback, *args, **kwargs) + else: + raise TypeError("Unsupported deadline %r" % deadline) + + def call_later(self, delay, callback, *args, **kwargs): + """Runs the ``callback`` after ``delay`` seconds have passed. + + Returns an opaque handle that may be passed to `remove_timeout` + to cancel. Note that unlike the `asyncio` method of the same + name, the returned object does not have a ``cancel()`` method. + + See `add_timeout` for comments on thread-safety and subclassing. + + .. versionadded:: 4.0 + """ + return self.call_at(self.time() + delay, callback, *args, **kwargs) + + def call_at(self, when, callback, *args, **kwargs): + """Runs the ``callback`` at the absolute time designated by ``when``. + + ``when`` must be a number using the same reference point as + `IOLoop.time`. + + Returns an opaque handle that may be passed to `remove_timeout` + to cancel. Note that unlike the `asyncio` method of the same + name, the returned object does not have a ``cancel()`` method. + + See `add_timeout` for comments on thread-safety and subclassing. + + .. versionadded:: 4.0 + """ + return self.add_timeout(when, callback, *args, **kwargs) + + def remove_timeout(self, timeout): + """Cancels a pending timeout. + + The argument is a handle as returned by `add_timeout`. It is + safe to call `remove_timeout` even if the callback has already + been run. + """ + raise NotImplementedError() + + def add_callback(self, callback, *args, **kwargs): + """Calls the given callback on the next I/O loop iteration. + + It is safe to call this method from any thread at any time, + except from a signal handler. Note that this is the **only** + method in `IOLoop` that makes this thread-safety guarantee; all + other interaction with the `IOLoop` must be done from that + `IOLoop`'s thread. `add_callback()` may be used to transfer + control from other threads to the `IOLoop`'s thread. + + To add a callback from a signal handler, see + `add_callback_from_signal`. + """ + raise NotImplementedError() + + def add_callback_from_signal(self, callback, *args, **kwargs): + """Calls the given callback on the next I/O loop iteration. + + Safe for use from a Python signal handler; should not be used + otherwise. + + Callbacks added with this method will be run without any + `.stack_context`, to avoid picking up the context of the function + that was interrupted by the signal. + """ + raise NotImplementedError() + + def spawn_callback(self, callback, *args, **kwargs): + """Calls the given callback on the next IOLoop iteration. + + Unlike all other callback-related methods on IOLoop, + ``spawn_callback`` does not associate the callback with its caller's + ``stack_context``, so it is suitable for fire-and-forget callbacks + that should not interfere with the caller. + + .. versionadded:: 4.0 + """ + with stack_context.NullContext(): + self.add_callback(callback, *args, **kwargs) + + def add_future(self, future, callback): + """Schedules a callback on the ``IOLoop`` when the given + `.Future` is finished. + + The callback is invoked with one argument, the + `.Future`. + + This method only accepts `.Future` objects and not other + awaitables (unlike most of Tornado where the two are + interchangeable). + """ + assert is_future(future) + callback = stack_context.wrap(callback) + future_add_done_callback( + future, lambda future: self.add_callback(callback, future)) + + def run_in_executor(self, executor, func, *args): + """Runs a function in a ``concurrent.futures.Executor``. If + ``executor`` is ``None``, the IO loop's default executor will be used. + + Use `functools.partial` to pass keyword arguments to ``func``. + + .. versionadded:: 5.0 + """ + if ThreadPoolExecutor is None: + raise RuntimeError( + "concurrent.futures is required to use IOLoop.run_in_executor") + + if executor is None: + if not hasattr(self, '_executor'): + from tornado.process import cpu_count + self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5)) + executor = self._executor + c_future = executor.submit(func, *args) + # Concurrent Futures are not usable with await. Wrap this in a + # Tornado Future instead, using self.add_future for thread-safety. + t_future = Future() + self.add_future(c_future, lambda f: chain_future(f, t_future)) + return t_future + + def set_default_executor(self, executor): + """Sets the default executor to use with :meth:`run_in_executor`. + + .. versionadded:: 5.0 + """ + self._executor = executor + + def _run_callback(self, callback): + """Runs a callback with error handling. + + For use in subclasses. + """ + try: + ret = callback() + if ret is not None: + from tornado import gen + # Functions that return Futures typically swallow all + # exceptions and store them in the Future. If a Future + # makes it out to the IOLoop, ensure its exception (if any) + # gets logged too. + try: + ret = gen.convert_yielded(ret) + except gen.BadYieldError: + # It's not unusual for add_callback to be used with + # methods returning a non-None and non-yieldable + # result, which should just be ignored. + pass + else: + self.add_future(ret, self._discard_future_result) + except Exception: + self.handle_callback_exception(callback) + + def _discard_future_result(self, future): + """Avoid unhandled-exception warnings from spawned coroutines.""" + future.result() + + def handle_callback_exception(self, callback): + """This method is called whenever a callback run by the `IOLoop` + throws an exception. + + By default simply logs the exception as an error. Subclasses + may override this method to customize reporting of exceptions. + + The exception itself is not passed explicitly, but is available + in `sys.exc_info`. + + .. versionchanged:: 5.0 + + When the `asyncio` event loop is used (which is now the + default on Python 3), some callback errors will be handled by + `asyncio` instead of this method. + + .. deprecated: 5.1 + + Support for this method will be removed in Tornado 6.0. + """ + app_log.error("Exception in callback %r", callback, exc_info=True) + + def split_fd(self, fd): + """Returns an (fd, obj) pair from an ``fd`` parameter. + + We accept both raw file descriptors and file-like objects as + input to `add_handler` and related methods. When a file-like + object is passed, we must retain the object itself so we can + close it correctly when the `IOLoop` shuts down, but the + poller interfaces favor file descriptors (they will accept + file-like objects and call ``fileno()`` for you, but they + always return the descriptor itself). + + This method is provided for use by `IOLoop` subclasses and should + not generally be used by application code. + + .. versionadded:: 4.0 + """ + try: + return fd.fileno(), fd + except AttributeError: + return fd, fd + + def close_fd(self, fd): + """Utility method to close an ``fd``. + + If ``fd`` is a file-like object, we close it directly; otherwise + we use `os.close`. + + This method is provided for use by `IOLoop` subclasses (in + implementations of ``IOLoop.close(all_fds=True)`` and should + not generally be used by application code. + + .. versionadded:: 4.0 + """ + try: + try: + fd.close() + except AttributeError: + os.close(fd) + except OSError: + pass + + +class PollIOLoop(IOLoop): + """Base class for IOLoops built around a select-like function. + + For concrete implementations, see `tornado.platform.epoll.EPollIOLoop` + (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or + `tornado.platform.select.SelectIOLoop` (all platforms). + """ + def initialize(self, impl, time_func=None, **kwargs): + super(PollIOLoop, self).initialize(**kwargs) + self._impl = impl + if hasattr(self._impl, 'fileno'): + set_close_exec(self._impl.fileno()) + self.time_func = time_func or time.time + self._handlers = {} + self._events = {} + self._callbacks = collections.deque() + self._timeouts = [] + self._cancellations = 0 + self._running = False + self._stopped = False + self._closing = False + self._thread_ident = None + self._pid = os.getpid() + self._blocking_signal_threshold = None + self._timeout_counter = itertools.count() + + # Create a pipe that we send bogus data to when we want to wake + # the I/O loop when it is idle + self._waker = Waker() + self.add_handler(self._waker.fileno(), + lambda fd, events: self._waker.consume(), + self.READ) + + @classmethod + def configurable_base(cls): + return PollIOLoop + + @classmethod + def configurable_default(cls): + if hasattr(select, "epoll"): + from tornado.platform.epoll import EPollIOLoop + return EPollIOLoop + if hasattr(select, "kqueue"): + # Python 2.6+ on BSD or Mac + from tornado.platform.kqueue import KQueueIOLoop + return KQueueIOLoop + from tornado.platform.select import SelectIOLoop + return SelectIOLoop + + def close(self, all_fds=False): + self._closing = True + self.remove_handler(self._waker.fileno()) + if all_fds: + for fd, handler in list(self._handlers.values()): + self.close_fd(fd) + self._waker.close() + self._impl.close() + self._callbacks = None + self._timeouts = None + if hasattr(self, '_executor'): + self._executor.shutdown() + + def add_handler(self, fd, handler, events): + fd, obj = self.split_fd(fd) + self._handlers[fd] = (obj, stack_context.wrap(handler)) + self._impl.register(fd, events | self.ERROR) + + def update_handler(self, fd, events): + fd, obj = self.split_fd(fd) + self._impl.modify(fd, events | self.ERROR) + + def remove_handler(self, fd): + fd, obj = self.split_fd(fd) + self._handlers.pop(fd, None) + self._events.pop(fd, None) + try: + self._impl.unregister(fd) + except Exception: + gen_log.debug("Error deleting fd from IOLoop", exc_info=True) + + def set_blocking_signal_threshold(self, seconds, action): + if not hasattr(signal, "setitimer"): + gen_log.error("set_blocking_signal_threshold requires a signal module " + "with the setitimer method") + return + self._blocking_signal_threshold = seconds + if seconds is not None: + signal.signal(signal.SIGALRM, + action if action is not None else signal.SIG_DFL) + + def start(self): + if self._running: + raise RuntimeError("IOLoop is already running") + if os.getpid() != self._pid: + raise RuntimeError("Cannot share PollIOLoops across processes") + self._setup_logging() + if self._stopped: + self._stopped = False + return + old_current = IOLoop.current(instance=False) + if old_current is not self: + self.make_current() + self._thread_ident = thread.get_ident() + self._running = True + + # signal.set_wakeup_fd closes a race condition in event loops: + # a signal may arrive at the beginning of select/poll/etc + # before it goes into its interruptible sleep, so the signal + # will be consumed without waking the select. The solution is + # for the (C, synchronous) signal handler to write to a pipe, + # which will then be seen by select. + # + # In python's signal handling semantics, this only matters on the + # main thread (fortunately, set_wakeup_fd only works on the main + # thread and will raise a ValueError otherwise). + # + # If someone has already set a wakeup fd, we don't want to + # disturb it. This is an issue for twisted, which does its + # SIGCHLD processing in response to its own wakeup fd being + # written to. As long as the wakeup fd is registered on the IOLoop, + # the loop will still wake up and everything should work. + old_wakeup_fd = None + if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': + # requires python 2.6+, unix. set_wakeup_fd exists but crashes + # the python process on windows. + try: + old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno()) + if old_wakeup_fd != -1: + # Already set, restore previous value. This is a little racy, + # but there's no clean get_wakeup_fd and in real use the + # IOLoop is just started once at the beginning. + signal.set_wakeup_fd(old_wakeup_fd) + old_wakeup_fd = None + except ValueError: + # Non-main thread, or the previous value of wakeup_fd + # is no longer valid. + old_wakeup_fd = None + + try: + while True: + # Prevent IO event starvation by delaying new callbacks + # to the next iteration of the event loop. + ncallbacks = len(self._callbacks) + + # Add any timeouts that have come due to the callback list. + # Do not run anything until we have determined which ones + # are ready, so timeouts that call add_timeout cannot + # schedule anything in this iteration. + due_timeouts = [] + if self._timeouts: + now = self.time() + while self._timeouts: + if self._timeouts[0].callback is None: + # The timeout was cancelled. Note that the + # cancellation check is repeated below for timeouts + # that are cancelled by another timeout or callback. + heapq.heappop(self._timeouts) + self._cancellations -= 1 + elif self._timeouts[0].deadline <= now: + due_timeouts.append(heapq.heappop(self._timeouts)) + else: + break + if (self._cancellations > 512 and + self._cancellations > (len(self._timeouts) >> 1)): + # Clean up the timeout queue when it gets large and it's + # more than half cancellations. + self._cancellations = 0 + self._timeouts = [x for x in self._timeouts + if x.callback is not None] + heapq.heapify(self._timeouts) + + for i in range(ncallbacks): + self._run_callback(self._callbacks.popleft()) + for timeout in due_timeouts: + if timeout.callback is not None: + self._run_callback(timeout.callback) + # Closures may be holding on to a lot of memory, so allow + # them to be freed before we go into our poll wait. + due_timeouts = timeout = None + + if self._callbacks: + # If any callbacks or timeouts called add_callback, + # we don't want to wait in poll() before we run them. + poll_timeout = 0.0 + elif self._timeouts: + # If there are any timeouts, schedule the first one. + # Use self.time() instead of 'now' to account for time + # spent running callbacks. + poll_timeout = self._timeouts[0].deadline - self.time() + poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT)) + else: + # No timeouts and no callbacks, so use the default. + poll_timeout = _POLL_TIMEOUT + + if not self._running: + break + + if self._blocking_signal_threshold is not None: + # clear alarm so it doesn't fire while poll is waiting for + # events. + signal.setitimer(signal.ITIMER_REAL, 0, 0) + + try: + event_pairs = self._impl.poll(poll_timeout) + except Exception as e: + # Depending on python version and IOLoop implementation, + # different exception types may be thrown and there are + # two ways EINTR might be signaled: + # * e.errno == errno.EINTR + # * e.args is like (errno.EINTR, 'Interrupted system call') + if errno_from_exception(e) == errno.EINTR: + continue + else: + raise + + if self._blocking_signal_threshold is not None: + signal.setitimer(signal.ITIMER_REAL, + self._blocking_signal_threshold, 0) + + # Pop one fd at a time from the set of pending fds and run + # its handler. Since that handler may perform actions on + # other file descriptors, there may be reentrant calls to + # this IOLoop that modify self._events + self._events.update(event_pairs) + while self._events: + fd, events = self._events.popitem() + try: + fd_obj, handler_func = self._handlers[fd] + handler_func(fd_obj, events) + except (OSError, IOError) as e: + if errno_from_exception(e) == errno.EPIPE: + # Happens when the client closes the connection + pass + else: + self.handle_callback_exception(self._handlers.get(fd)) + except Exception: + self.handle_callback_exception(self._handlers.get(fd)) + fd_obj = handler_func = None + + finally: + # reset the stopped flag so another start/stop pair can be issued + self._stopped = False + if self._blocking_signal_threshold is not None: + signal.setitimer(signal.ITIMER_REAL, 0, 0) + if old_current is None: + IOLoop.clear_current() + elif old_current is not self: + old_current.make_current() + if old_wakeup_fd is not None: + signal.set_wakeup_fd(old_wakeup_fd) + + def stop(self): + self._running = False + self._stopped = True + self._waker.wake() + + def time(self): + return self.time_func() + + def call_at(self, deadline, callback, *args, **kwargs): + timeout = _Timeout( + deadline, + functools.partial(stack_context.wrap(callback), *args, **kwargs), + self) + heapq.heappush(self._timeouts, timeout) + return timeout + + def remove_timeout(self, timeout): + # Removing from a heap is complicated, so just leave the defunct + # timeout object in the queue (see discussion in + # http://docs.python.org/library/heapq.html). + # If this turns out to be a problem, we could add a garbage + # collection pass whenever there are too many dead timeouts. + timeout.callback = None + self._cancellations += 1 + + def add_callback(self, callback, *args, **kwargs): + if self._closing: + return + # Blindly insert into self._callbacks. This is safe even + # from signal handlers because deque.append is atomic. + self._callbacks.append(functools.partial( + stack_context.wrap(callback), *args, **kwargs)) + if thread.get_ident() != self._thread_ident: + # This will write one byte but Waker.consume() reads many + # at once, so it's ok to write even when not strictly + # necessary. + self._waker.wake() + else: + # If we're on the IOLoop's thread, we don't need to wake anyone. + pass + + def add_callback_from_signal(self, callback, *args, **kwargs): + with stack_context.NullContext(): + self.add_callback(callback, *args, **kwargs) + + +class _Timeout(object): + """An IOLoop timeout, a UNIX timestamp and a callback""" + + # Reduce memory overhead when there are lots of pending callbacks + __slots__ = ['deadline', 'callback', 'tdeadline'] + + def __init__(self, deadline, callback, io_loop): + if not isinstance(deadline, numbers.Real): + raise TypeError("Unsupported deadline %r" % deadline) + self.deadline = deadline + self.callback = callback + self.tdeadline = (deadline, next(io_loop._timeout_counter)) + + # Comparison methods to sort by deadline, with object id as a tiebreaker + # to guarantee a consistent ordering. The heapq module uses __le__ + # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons + # use __lt__). + def __lt__(self, other): + return self.tdeadline < other.tdeadline + + def __le__(self, other): + return self.tdeadline <= other.tdeadline + + +class PeriodicCallback(object): + """Schedules the given callback to be called periodically. + + The callback is called every ``callback_time`` milliseconds. + Note that the timeout is given in milliseconds, while most other + time-related functions in Tornado use seconds. + + If ``jitter`` is specified, each callback time will be randomly selected + within a window of ``jitter * callback_time`` milliseconds. + Jitter can be used to reduce alignment of events with similar periods. + A jitter of 0.1 means allowing a 10% variation in callback time. + The window is centered on ``callback_time`` so the total number of calls + within a given interval should not be significantly affected by adding + jitter. + + If the callback runs for longer than ``callback_time`` milliseconds, + subsequent invocations will be skipped to get back on schedule. + + `start` must be called after the `PeriodicCallback` is created. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. versionchanged:: 5.1 + The ``jitter`` argument is added. + """ + def __init__(self, callback, callback_time, jitter=0): + self.callback = callback + if callback_time <= 0: + raise ValueError("Periodic callback must have a positive callback_time") + self.callback_time = callback_time + self.jitter = jitter + self._running = False + self._timeout = None + + def start(self): + """Starts the timer.""" + # Looking up the IOLoop here allows to first instantiate the + # PeriodicCallback in another thread, then start it using + # IOLoop.add_callback(). + self.io_loop = IOLoop.current() + self._running = True + self._next_timeout = self.io_loop.time() + self._schedule_next() + + def stop(self): + """Stops the timer.""" + self._running = False + if self._timeout is not None: + self.io_loop.remove_timeout(self._timeout) + self._timeout = None + + def is_running(self): + """Return True if this `.PeriodicCallback` has been started. + + .. versionadded:: 4.1 + """ + return self._running + + def _run(self): + if not self._running: + return + try: + return self.callback() + except Exception: + self.io_loop.handle_callback_exception(self.callback) + finally: + self._schedule_next() + + def _schedule_next(self): + if self._running: + self._update_next(self.io_loop.time()) + self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) + + def _update_next(self, current_time): + callback_time_sec = self.callback_time / 1000.0 + if self.jitter: + # apply jitter fraction + callback_time_sec *= 1 + (self.jitter * (random.random() - 0.5)) + if self._next_timeout <= current_time: + # The period should be measured from the start of one call + # to the start of the next. If one call takes too long, + # skip cycles to get back to a multiple of the original + # schedule. + self._next_timeout += (math.floor((current_time - self._next_timeout) / + callback_time_sec) + 1) * callback_time_sec + else: + # If the clock moved backwards, ensure we advance the next + # timeout instead of recomputing the same value again. + # This may result in long gaps between callbacks if the + # clock jumps backwards by a lot, but the far more common + # scenario is a small NTP adjustment that should just be + # ignored. + # + # Note that on some systems if time.time() runs slower + # than time.monotonic() (most common on windows), we + # effectively experience a small backwards time jump on + # every iteration because PeriodicCallback uses + # time.time() while asyncio schedules callbacks using + # time.monotonic(). + # https://github.com/tornadoweb/tornado/issues/2333 + self._next_timeout += callback_time_sec diff --git a/server/www/packages/packages-windows/x86/tornado/iostream.py b/server/www/packages/packages-windows/x86/tornado/iostream.py new file mode 100644 index 0000000..89e1e23 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/iostream.py @@ -0,0 +1,1757 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utility classes to write to and read from non-blocking files and sockets. + +Contents: + +* `BaseIOStream`: Generic interface for reading and writing. +* `IOStream`: Implementation of BaseIOStream using non-blocking sockets. +* `SSLIOStream`: SSL-aware version of IOStream. +* `PipeIOStream`: Pipe-based IOStream implementation. +""" + +from __future__ import absolute_import, division, print_function + +import collections +import errno +import io +import numbers +import os +import socket +import sys +import re +import warnings + +from tornado.concurrent import Future +from tornado import ioloop +from tornado.log import gen_log, app_log +from tornado.netutil import ssl_wrap_socket, _client_ssl_defaults, _server_ssl_defaults +from tornado import stack_context +from tornado.util import errno_from_exception + +try: + from tornado.platform.posix import _set_nonblocking +except ImportError: + _set_nonblocking = None + +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine + ssl = None + +# These errnos indicate that a non-blocking operation must be retried +# at a later time. On most platforms they're the same value, but on +# some they differ. +_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) + +if hasattr(errno, "WSAEWOULDBLOCK"): + _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore + +# These errnos indicate that a connection has been abruptly terminated. +# They should be caught and handled less noisily than other errors. +_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE, + errno.ETIMEDOUT) + +if hasattr(errno, "WSAECONNRESET"): + _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore # noqa: E501 + +if sys.platform == 'darwin': + # OSX appears to have a race condition that causes send(2) to return + # EPROTOTYPE if called while a socket is being torn down: + # http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ + # Since the socket is being closed anyway, treat this as an ECONNRESET + # instead of an unexpected error. + _ERRNO_CONNRESET += (errno.EPROTOTYPE,) # type: ignore + +# More non-portable errnos: +_ERRNO_INPROGRESS = (errno.EINPROGRESS,) + +if hasattr(errno, "WSAEINPROGRESS"): + _ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) # type: ignore + +_WINDOWS = sys.platform.startswith('win') + + +class StreamClosedError(IOError): + """Exception raised by `IOStream` methods when the stream is closed. + + Note that the close callback is scheduled to run *after* other + callbacks on the stream (to allow for buffered data to be processed), + so you may see this error before you see the close callback. + + The ``real_error`` attribute contains the underlying error that caused + the stream to close (if any). + + .. versionchanged:: 4.3 + Added the ``real_error`` attribute. + """ + def __init__(self, real_error=None): + super(StreamClosedError, self).__init__('Stream is closed') + self.real_error = real_error + + +class UnsatisfiableReadError(Exception): + """Exception raised when a read cannot be satisfied. + + Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes`` + argument. + """ + pass + + +class StreamBufferFullError(Exception): + """Exception raised by `IOStream` methods when the buffer is full. + """ + + +class _StreamBuffer(object): + """ + A specialized buffer that tries to avoid copies when large pieces + of data are encountered. + """ + + def __init__(self): + # A sequence of (False, bytearray) and (True, memoryview) objects + self._buffers = collections.deque() + # Position in the first buffer + self._first_pos = 0 + self._size = 0 + + def __len__(self): + return self._size + + # Data above this size will be appended separately instead + # of extending an existing bytearray + _large_buf_threshold = 2048 + + def append(self, data): + """ + Append the given piece of data (should be a buffer-compatible object). + """ + size = len(data) + if size > self._large_buf_threshold: + if not isinstance(data, memoryview): + data = memoryview(data) + self._buffers.append((True, data)) + elif size > 0: + if self._buffers: + is_memview, b = self._buffers[-1] + new_buf = is_memview or len(b) >= self._large_buf_threshold + else: + new_buf = True + if new_buf: + self._buffers.append((False, bytearray(data))) + else: + b += data + + self._size += size + + def peek(self, size): + """ + Get a view over at most ``size`` bytes (possibly fewer) at the + current buffer position. + """ + assert size > 0 + try: + is_memview, b = self._buffers[0] + except IndexError: + return memoryview(b'') + + pos = self._first_pos + if is_memview: + return b[pos:pos + size] + else: + return memoryview(b)[pos:pos + size] + + def advance(self, size): + """ + Advance the current buffer position by ``size`` bytes. + """ + assert 0 < size <= self._size + self._size -= size + pos = self._first_pos + + buffers = self._buffers + while buffers and size > 0: + is_large, b = buffers[0] + b_remain = len(b) - size - pos + if b_remain <= 0: + buffers.popleft() + size -= len(b) - pos + pos = 0 + elif is_large: + pos += size + size = 0 + else: + # Amortized O(1) shrink for Python 2 + pos += size + if len(b) <= 2 * pos: + del b[:pos] + pos = 0 + size = 0 + + assert size == 0 + self._first_pos = pos + + +class BaseIOStream(object): + """A utility class to write to and read from a non-blocking file or socket. + + We support a non-blocking ``write()`` and a family of ``read_*()`` methods. + All of the methods take an optional ``callback`` argument and return a + `.Future` only if no callback is given. When the operation completes, + the callback will be run or the `.Future` will resolve with the data + read (or ``None`` for ``write()``). All outstanding ``Futures`` will + resolve with a `StreamClosedError` when the stream is closed; users + of the callback interface will be notified via + `.BaseIOStream.set_close_callback` instead. + + When a stream is closed due to an error, the IOStream's ``error`` + attribute contains the exception object. + + Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, + `read_from_fd`, and optionally `get_fd_error`. + """ + def __init__(self, max_buffer_size=None, + read_chunk_size=None, max_write_buffer_size=None): + """`BaseIOStream` constructor. + + :arg max_buffer_size: Maximum amount of incoming data to buffer; + defaults to 100MB. + :arg read_chunk_size: Amount of data to read at one time from the + underlying transport; defaults to 64KB. + :arg max_write_buffer_size: Amount of outgoing data to buffer; + defaults to unlimited. + + .. versionchanged:: 4.0 + Add the ``max_write_buffer_size`` parameter. Changed default + ``read_chunk_size`` to 64KB. + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been + removed. + """ + self.io_loop = ioloop.IOLoop.current() + self.max_buffer_size = max_buffer_size or 104857600 + # A chunk size that is too close to max_buffer_size can cause + # spurious failures. + self.read_chunk_size = min(read_chunk_size or 65536, + self.max_buffer_size // 2) + self.max_write_buffer_size = max_write_buffer_size + self.error = None + self._read_buffer = bytearray() + self._read_buffer_pos = 0 + self._read_buffer_size = 0 + self._user_read_buffer = False + self._after_user_read_buffer = None + self._write_buffer = _StreamBuffer() + self._total_write_index = 0 + self._total_write_done_index = 0 + self._read_delimiter = None + self._read_regex = None + self._read_max_bytes = None + self._read_bytes = None + self._read_partial = False + self._read_until_close = False + self._read_callback = None + self._read_future = None + self._streaming_callback = None + self._write_callback = None + self._write_futures = collections.deque() + self._close_callback = None + self._connect_callback = None + self._connect_future = None + # _ssl_connect_future should be defined in SSLIOStream + # but it's here so we can clean it up in maybe_run_close_callback. + # TODO: refactor that so subclasses can add additional futures + # to be cancelled. + self._ssl_connect_future = None + self._connecting = False + self._state = None + self._pending_callbacks = 0 + self._closed = False + + def fileno(self): + """Returns the file descriptor for this stream.""" + raise NotImplementedError() + + def close_fd(self): + """Closes the file underlying this stream. + + ``close_fd`` is called by `BaseIOStream` and should not be called + elsewhere; other users should call `close` instead. + """ + raise NotImplementedError() + + def write_to_fd(self, data): + """Attempts to write ``data`` to the underlying file. + + Returns the number of bytes written. + """ + raise NotImplementedError() + + def read_from_fd(self, buf): + """Attempts to read from the underlying file. + + Reads up to ``len(buf)`` bytes, storing them in the buffer. + Returns the number of bytes read. Returns None if there was + nothing to read (the socket returned `~errno.EWOULDBLOCK` or + equivalent), and zero on EOF. + + .. versionchanged:: 5.0 + + Interface redesigned to take a buffer and return a number + of bytes instead of a freshly-allocated object. + """ + raise NotImplementedError() + + def get_fd_error(self): + """Returns information about any error on the underlying file. + + This method is called after the `.IOLoop` has signaled an error on the + file descriptor, and should return an Exception (such as `socket.error` + with additional information, or None if no such information is + available. + """ + return None + + def read_until_regex(self, regex, callback=None, max_bytes=None): + """Asynchronously read until we have matched the given regex. + + The result includes the data that matches the regex and anything + that came before it. If a callback is given, it will be run + with the data as an argument; if not, this method returns a + `.Future`. + + If ``max_bytes`` is not None, the connection will be closed + if more than ``max_bytes`` bytes have been read and the regex is + not satisfied. + + .. versionchanged:: 4.0 + Added the ``max_bytes`` argument. The ``callback`` argument is + now optional and a `.Future` will be returned if it is omitted. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + + """ + future = self._set_read_callback(callback) + self._read_regex = re.compile(regex) + self._read_max_bytes = max_bytes + try: + self._try_inline_read() + except UnsatisfiableReadError as e: + # Handle this the same way as in _handle_events. + gen_log.info("Unsatisfiable read, closing connection: %s" % e) + self.close(exc_info=e) + return future + except: + if future is not None: + # Ensure that the future doesn't log an error because its + # failure was never examined. + future.add_done_callback(lambda f: f.exception()) + raise + return future + + def read_until(self, delimiter, callback=None, max_bytes=None): + """Asynchronously read until we have found the given delimiter. + + The result includes all the data read including the delimiter. + If a callback is given, it will be run with the data as an argument; + if not, this method returns a `.Future`. + + If ``max_bytes`` is not None, the connection will be closed + if more than ``max_bytes`` bytes have been read and the delimiter + is not found. + + .. versionchanged:: 4.0 + Added the ``max_bytes`` argument. The ``callback`` argument is + now optional and a `.Future` will be returned if it is omitted. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + """ + future = self._set_read_callback(callback) + self._read_delimiter = delimiter + self._read_max_bytes = max_bytes + try: + self._try_inline_read() + except UnsatisfiableReadError as e: + # Handle this the same way as in _handle_events. + gen_log.info("Unsatisfiable read, closing connection: %s" % e) + self.close(exc_info=e) + return future + except: + if future is not None: + future.add_done_callback(lambda f: f.exception()) + raise + return future + + def read_bytes(self, num_bytes, callback=None, streaming_callback=None, + partial=False): + """Asynchronously read a number of bytes. + + If a ``streaming_callback`` is given, it will be called with chunks + of data as they become available, and the final result will be empty. + Otherwise, the result is all the data that was read. + If a callback is given, it will be run with the data as an argument; + if not, this method returns a `.Future`. + + If ``partial`` is true, the callback is run as soon as we have + any bytes to return (but never more than ``num_bytes``) + + .. versionchanged:: 4.0 + Added the ``partial`` argument. The callback argument is now + optional and a `.Future` will be returned if it is omitted. + + .. deprecated:: 5.1 + + The ``callback`` and ``streaming_callback`` arguments are + deprecated and will be removed in Tornado 6.0. Use the + returned `.Future` (and ``partial=True`` for + ``streaming_callback``) instead. + + """ + future = self._set_read_callback(callback) + assert isinstance(num_bytes, numbers.Integral) + self._read_bytes = num_bytes + self._read_partial = partial + if streaming_callback is not None: + warnings.warn("streaming_callback is deprecated, use partial instead", + DeprecationWarning) + self._streaming_callback = stack_context.wrap(streaming_callback) + try: + self._try_inline_read() + except: + if future is not None: + future.add_done_callback(lambda f: f.exception()) + raise + return future + + def read_into(self, buf, callback=None, partial=False): + """Asynchronously read a number of bytes. + + ``buf`` must be a writable buffer into which data will be read. + If a callback is given, it will be run with the number of read + bytes as an argument; if not, this method returns a `.Future`. + + If ``partial`` is true, the callback is run as soon as any bytes + have been read. Otherwise, it is run when the ``buf`` has been + entirely filled with read data. + + .. versionadded:: 5.0 + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + + """ + future = self._set_read_callback(callback) + + # First copy data already in read buffer + available_bytes = self._read_buffer_size + n = len(buf) + if available_bytes >= n: + end = self._read_buffer_pos + n + buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos:end] + del self._read_buffer[:end] + self._after_user_read_buffer = self._read_buffer + elif available_bytes > 0: + buf[:available_bytes] = memoryview(self._read_buffer)[self._read_buffer_pos:] + + # Set up the supplied buffer as our temporary read buffer. + # The original (if it had any data remaining) has been + # saved for later. + self._user_read_buffer = True + self._read_buffer = buf + self._read_buffer_pos = 0 + self._read_buffer_size = available_bytes + self._read_bytes = n + self._read_partial = partial + + try: + self._try_inline_read() + except: + if future is not None: + future.add_done_callback(lambda f: f.exception()) + raise + return future + + def read_until_close(self, callback=None, streaming_callback=None): + """Asynchronously reads all data from the socket until it is closed. + + If a ``streaming_callback`` is given, it will be called with chunks + of data as they become available, and the final result will be empty. + Otherwise, the result is all the data that was read. + If a callback is given, it will be run with the data as an argument; + if not, this method returns a `.Future`. + + Note that if a ``streaming_callback`` is used, data will be + read from the socket as quickly as it becomes available; there + is no way to apply backpressure or cancel the reads. If flow + control or cancellation are desired, use a loop with + `read_bytes(partial=True) <.read_bytes>` instead. + + .. versionchanged:: 4.0 + The callback argument is now optional and a `.Future` will + be returned if it is omitted. + + .. deprecated:: 5.1 + + The ``callback`` and ``streaming_callback`` arguments are + deprecated and will be removed in Tornado 6.0. Use the + returned `.Future` (and `read_bytes` with ``partial=True`` + for ``streaming_callback``) instead. + + """ + future = self._set_read_callback(callback) + if streaming_callback is not None: + warnings.warn("streaming_callback is deprecated, use read_bytes(partial=True) instead", + DeprecationWarning) + self._streaming_callback = stack_context.wrap(streaming_callback) + if self.closed(): + if self._streaming_callback is not None: + self._run_read_callback(self._read_buffer_size, True) + self._run_read_callback(self._read_buffer_size, False) + return future + self._read_until_close = True + try: + self._try_inline_read() + except: + if future is not None: + future.add_done_callback(lambda f: f.exception()) + raise + return future + + def write(self, data, callback=None): + """Asynchronously write the given data to this stream. + + If ``callback`` is given, we call it when all of the buffered write + data has been successfully written to the stream. If there was + previously buffered write data and an old write callback, that + callback is simply overwritten with this new callback. + + If no ``callback`` is given, this method returns a `.Future` that + resolves (with a result of ``None``) when the write has been + completed. + + The ``data`` argument may be of type `bytes` or `memoryview`. + + .. versionchanged:: 4.0 + Now returns a `.Future` if no callback is given. + + .. versionchanged:: 4.5 + Added support for `memoryview` arguments. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + + """ + self._check_closed() + if data: + if (self.max_write_buffer_size is not None and + len(self._write_buffer) + len(data) > self.max_write_buffer_size): + raise StreamBufferFullError("Reached maximum write buffer size") + self._write_buffer.append(data) + self._total_write_index += len(data) + if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) + self._write_callback = stack_context.wrap(callback) + future = None + else: + future = Future() + future.add_done_callback(lambda f: f.exception()) + self._write_futures.append((self._total_write_index, future)) + if not self._connecting: + self._handle_write() + if self._write_buffer: + self._add_io_state(self.io_loop.WRITE) + self._maybe_add_error_listener() + return future + + def set_close_callback(self, callback): + """Call the given callback when the stream is closed. + + This mostly is not necessary for applications that use the + `.Future` interface; all outstanding ``Futures`` will resolve + with a `StreamClosedError` when the stream is closed. However, + it is still useful as a way to signal that the stream has been + closed while no other read or write is in progress. + + Unlike other callback-based interfaces, ``set_close_callback`` + will not be removed in Tornado 6.0. + """ + self._close_callback = stack_context.wrap(callback) + self._maybe_add_error_listener() + + def close(self, exc_info=False): + """Close this stream. + + If ``exc_info`` is true, set the ``error`` attribute to the current + exception from `sys.exc_info` (or if ``exc_info`` is a tuple, + use that instead of `sys.exc_info`). + """ + if not self.closed(): + if exc_info: + if isinstance(exc_info, tuple): + self.error = exc_info[1] + elif isinstance(exc_info, BaseException): + self.error = exc_info + else: + exc_info = sys.exc_info() + if any(exc_info): + self.error = exc_info[1] + if self._read_until_close: + if (self._streaming_callback is not None and + self._read_buffer_size): + self._run_read_callback(self._read_buffer_size, True) + self._read_until_close = False + self._run_read_callback(self._read_buffer_size, False) + if self._state is not None: + self.io_loop.remove_handler(self.fileno()) + self._state = None + self.close_fd() + self._closed = True + self._maybe_run_close_callback() + + def _maybe_run_close_callback(self): + # If there are pending callbacks, don't run the close callback + # until they're done (see _maybe_add_error_handler) + if self.closed() and self._pending_callbacks == 0: + futures = [] + if self._read_future is not None: + futures.append(self._read_future) + self._read_future = None + futures += [future for _, future in self._write_futures] + self._write_futures.clear() + if self._connect_future is not None: + futures.append(self._connect_future) + self._connect_future = None + if self._ssl_connect_future is not None: + futures.append(self._ssl_connect_future) + self._ssl_connect_future = None + for future in futures: + future.set_exception(StreamClosedError(real_error=self.error)) + future.exception() + if self._close_callback is not None: + cb = self._close_callback + self._close_callback = None + self._run_callback(cb) + # Delete any unfinished callbacks to break up reference cycles. + self._read_callback = self._write_callback = None + # Clear the buffers so they can be cleared immediately even + # if the IOStream object is kept alive by a reference cycle. + # TODO: Clear the read buffer too; it currently breaks some tests. + self._write_buffer = None + + def reading(self): + """Returns true if we are currently reading from the stream.""" + return self._read_callback is not None or self._read_future is not None + + def writing(self): + """Returns true if we are currently writing to the stream.""" + return bool(self._write_buffer) + + def closed(self): + """Returns true if the stream has been closed.""" + return self._closed + + def set_nodelay(self, value): + """Sets the no-delay flag for this stream. + + By default, data written to TCP streams may be held for a time + to make the most efficient use of bandwidth (according to + Nagle's algorithm). The no-delay flag requests that data be + written as soon as possible, even if doing so would consume + additional bandwidth. + + This flag is currently defined only for TCP-based ``IOStreams``. + + .. versionadded:: 3.1 + """ + pass + + def _handle_events(self, fd, events): + if self.closed(): + gen_log.warning("Got events for closed stream %s", fd) + return + try: + if self._connecting: + # Most IOLoops will report a write failed connect + # with the WRITE event, but SelectIOLoop reports a + # READ as well so we must check for connecting before + # either. + self._handle_connect() + if self.closed(): + return + if events & self.io_loop.READ: + self._handle_read() + if self.closed(): + return + if events & self.io_loop.WRITE: + self._handle_write() + if self.closed(): + return + if events & self.io_loop.ERROR: + self.error = self.get_fd_error() + # We may have queued up a user callback in _handle_read or + # _handle_write, so don't close the IOStream until those + # callbacks have had a chance to run. + self.io_loop.add_callback(self.close) + return + state = self.io_loop.ERROR + if self.reading(): + state |= self.io_loop.READ + if self.writing(): + state |= self.io_loop.WRITE + if state == self.io_loop.ERROR and self._read_buffer_size == 0: + # If the connection is idle, listen for reads too so + # we can tell if the connection is closed. If there is + # data in the read buffer we won't run the close callback + # yet anyway, so we don't need to listen in this case. + state |= self.io_loop.READ + if state != self._state: + assert self._state is not None, \ + "shouldn't happen: _handle_events without self._state" + self._state = state + self.io_loop.update_handler(self.fileno(), self._state) + except UnsatisfiableReadError as e: + gen_log.info("Unsatisfiable read, closing connection: %s" % e) + self.close(exc_info=e) + except Exception as e: + gen_log.error("Uncaught exception, closing connection.", + exc_info=True) + self.close(exc_info=e) + raise + + def _run_callback(self, callback, *args): + def wrapper(): + self._pending_callbacks -= 1 + try: + return callback(*args) + except Exception as e: + app_log.error("Uncaught exception, closing connection.", + exc_info=True) + # Close the socket on an uncaught exception from a user callback + # (It would eventually get closed when the socket object is + # gc'd, but we don't want to rely on gc happening before we + # run out of file descriptors) + self.close(exc_info=e) + # Re-raise the exception so that IOLoop.handle_callback_exception + # can see it and log the error + raise + finally: + self._maybe_add_error_listener() + # We schedule callbacks to be run on the next IOLoop iteration + # rather than running them directly for several reasons: + # * Prevents unbounded stack growth when a callback calls an + # IOLoop operation that immediately runs another callback + # * Provides a predictable execution context for e.g. + # non-reentrant mutexes + # * Ensures that the try/except in wrapper() is run outside + # of the application's StackContexts + with stack_context.NullContext(): + # stack_context was already captured in callback, we don't need to + # capture it again for IOStream's wrapper. This is especially + # important if the callback was pre-wrapped before entry to + # IOStream (as in HTTPConnection._header_callback), as we could + # capture and leak the wrong context here. + self._pending_callbacks += 1 + self.io_loop.add_callback(wrapper) + + def _read_to_buffer_loop(self): + # This method is called from _handle_read and _try_inline_read. + try: + if self._read_bytes is not None: + target_bytes = self._read_bytes + elif self._read_max_bytes is not None: + target_bytes = self._read_max_bytes + elif self.reading(): + # For read_until without max_bytes, or + # read_until_close, read as much as we can before + # scanning for the delimiter. + target_bytes = None + else: + target_bytes = 0 + next_find_pos = 0 + # Pretend to have a pending callback so that an EOF in + # _read_to_buffer doesn't trigger an immediate close + # callback. At the end of this method we'll either + # establish a real pending callback via + # _read_from_buffer or run the close callback. + # + # We need two try statements here so that + # pending_callbacks is decremented before the `except` + # clause below (which calls `close` and does need to + # trigger the callback) + self._pending_callbacks += 1 + while not self.closed(): + # Read from the socket until we get EWOULDBLOCK or equivalent. + # SSL sockets do some internal buffering, and if the data is + # sitting in the SSL object's buffer select() and friends + # can't see it; the only way to find out if it's there is to + # try to read it. + if self._read_to_buffer() == 0: + break + + self._run_streaming_callback() + + # If we've read all the bytes we can use, break out of + # this loop. We can't just call read_from_buffer here + # because of subtle interactions with the + # pending_callback and error_listener mechanisms. + # + # If we've reached target_bytes, we know we're done. + if (target_bytes is not None and + self._read_buffer_size >= target_bytes): + break + + # Otherwise, we need to call the more expensive find_read_pos. + # It's inefficient to do this on every read, so instead + # do it on the first read and whenever the read buffer + # size has doubled. + if self._read_buffer_size >= next_find_pos: + pos = self._find_read_pos() + if pos is not None: + return pos + next_find_pos = self._read_buffer_size * 2 + return self._find_read_pos() + finally: + self._pending_callbacks -= 1 + + def _handle_read(self): + try: + pos = self._read_to_buffer_loop() + except UnsatisfiableReadError: + raise + except Exception as e: + gen_log.warning("error on read: %s" % e) + self.close(exc_info=e) + return + if pos is not None: + self._read_from_buffer(pos) + return + else: + self._maybe_run_close_callback() + + def _set_read_callback(self, callback): + assert self._read_callback is None, "Already reading" + assert self._read_future is None, "Already reading" + if callback is not None: + warnings.warn("callbacks are deprecated, use returned Future instead", + DeprecationWarning) + self._read_callback = stack_context.wrap(callback) + else: + self._read_future = Future() + return self._read_future + + def _run_read_callback(self, size, streaming): + if self._user_read_buffer: + self._read_buffer = self._after_user_read_buffer or bytearray() + self._after_user_read_buffer = None + self._read_buffer_pos = 0 + self._read_buffer_size = len(self._read_buffer) + self._user_read_buffer = False + result = size + else: + result = self._consume(size) + if streaming: + callback = self._streaming_callback + else: + callback = self._read_callback + self._read_callback = self._streaming_callback = None + if self._read_future is not None: + assert callback is None + future = self._read_future + self._read_future = None + + future.set_result(result) + if callback is not None: + assert (self._read_future is None) or streaming + self._run_callback(callback, result) + else: + # If we scheduled a callback, we will add the error listener + # afterwards. If we didn't, we have to do it now. + self._maybe_add_error_listener() + + def _try_inline_read(self): + """Attempt to complete the current read operation from buffered data. + + If the read can be completed without blocking, schedules the + read callback on the next IOLoop iteration; otherwise starts + listening for reads on the socket. + """ + # See if we've already got the data from a previous read + self._run_streaming_callback() + pos = self._find_read_pos() + if pos is not None: + self._read_from_buffer(pos) + return + self._check_closed() + try: + pos = self._read_to_buffer_loop() + except Exception: + # If there was an in _read_to_buffer, we called close() already, + # but couldn't run the close callback because of _pending_callbacks. + # Before we escape from this function, run the close callback if + # applicable. + self._maybe_run_close_callback() + raise + if pos is not None: + self._read_from_buffer(pos) + return + # We couldn't satisfy the read inline, so either close the stream + # or listen for new data. + if self.closed(): + self._maybe_run_close_callback() + else: + self._add_io_state(ioloop.IOLoop.READ) + + def _read_to_buffer(self): + """Reads from the socket and appends the result to the read buffer. + + Returns the number of bytes read. Returns 0 if there is nothing + to read (i.e. the read returns EWOULDBLOCK or equivalent). On + error closes the socket and raises an exception. + """ + try: + while True: + try: + if self._user_read_buffer: + buf = memoryview(self._read_buffer)[self._read_buffer_size:] + else: + buf = bytearray(self.read_chunk_size) + bytes_read = self.read_from_fd(buf) + except (socket.error, IOError, OSError) as e: + if errno_from_exception(e) == errno.EINTR: + continue + # ssl.SSLError is a subclass of socket.error + if self._is_connreset(e): + # Treat ECONNRESET as a connection close rather than + # an error to minimize log spam (the exception will + # be available on self.error for apps that care). + self.close(exc_info=e) + return + self.close(exc_info=e) + raise + break + if bytes_read is None: + return 0 + elif bytes_read == 0: + self.close() + return 0 + if not self._user_read_buffer: + self._read_buffer += memoryview(buf)[:bytes_read] + self._read_buffer_size += bytes_read + finally: + # Break the reference to buf so we don't waste a chunk's worth of + # memory in case an exception hangs on to our stack frame. + buf = None + if self._read_buffer_size > self.max_buffer_size: + gen_log.error("Reached maximum read buffer size") + self.close() + raise StreamBufferFullError("Reached maximum read buffer size") + return bytes_read + + def _run_streaming_callback(self): + if self._streaming_callback is not None and self._read_buffer_size: + bytes_to_consume = self._read_buffer_size + if self._read_bytes is not None: + bytes_to_consume = min(self._read_bytes, bytes_to_consume) + self._read_bytes -= bytes_to_consume + self._run_read_callback(bytes_to_consume, True) + + def _read_from_buffer(self, pos): + """Attempts to complete the currently-pending read from the buffer. + + The argument is either a position in the read buffer or None, + as returned by _find_read_pos. + """ + self._read_bytes = self._read_delimiter = self._read_regex = None + self._read_partial = False + self._run_read_callback(pos, False) + + def _find_read_pos(self): + """Attempts to find a position in the read buffer that satisfies + the currently-pending read. + + Returns a position in the buffer if the current read can be satisfied, + or None if it cannot. + """ + if (self._read_bytes is not None and + (self._read_buffer_size >= self._read_bytes or + (self._read_partial and self._read_buffer_size > 0))): + num_bytes = min(self._read_bytes, self._read_buffer_size) + return num_bytes + elif self._read_delimiter is not None: + # Multi-byte delimiters (e.g. '\r\n') may straddle two + # chunks in the read buffer, so we can't easily find them + # without collapsing the buffer. However, since protocols + # using delimited reads (as opposed to reads of a known + # length) tend to be "line" oriented, the delimiter is likely + # to be in the first few chunks. Merge the buffer gradually + # since large merges are relatively expensive and get undone in + # _consume(). + if self._read_buffer: + loc = self._read_buffer.find(self._read_delimiter, + self._read_buffer_pos) + if loc != -1: + loc -= self._read_buffer_pos + delimiter_len = len(self._read_delimiter) + self._check_max_bytes(self._read_delimiter, + loc + delimiter_len) + return loc + delimiter_len + self._check_max_bytes(self._read_delimiter, + self._read_buffer_size) + elif self._read_regex is not None: + if self._read_buffer: + m = self._read_regex.search(self._read_buffer, + self._read_buffer_pos) + if m is not None: + loc = m.end() - self._read_buffer_pos + self._check_max_bytes(self._read_regex, loc) + return loc + self._check_max_bytes(self._read_regex, self._read_buffer_size) + return None + + def _check_max_bytes(self, delimiter, size): + if (self._read_max_bytes is not None and + size > self._read_max_bytes): + raise UnsatisfiableReadError( + "delimiter %r not found within %d bytes" % ( + delimiter, self._read_max_bytes)) + + def _handle_write(self): + while True: + size = len(self._write_buffer) + if not size: + break + assert size > 0 + try: + if _WINDOWS: + # On windows, socket.send blows up if given a + # write buffer that's too large, instead of just + # returning the number of bytes it was able to + # process. Therefore we must not call socket.send + # with more than 128KB at a time. + size = 128 * 1024 + + num_bytes = self.write_to_fd(self._write_buffer.peek(size)) + if num_bytes == 0: + break + self._write_buffer.advance(num_bytes) + self._total_write_done_index += num_bytes + except (socket.error, IOError, OSError) as e: + if e.args[0] in _ERRNO_WOULDBLOCK: + break + else: + if not self._is_connreset(e): + # Broken pipe errors are usually caused by connection + # reset, and its better to not log EPIPE errors to + # minimize log spam + gen_log.warning("Write error on %s: %s", + self.fileno(), e) + self.close(exc_info=e) + return + + while self._write_futures: + index, future = self._write_futures[0] + if index > self._total_write_done_index: + break + self._write_futures.popleft() + future.set_result(None) + + if not len(self._write_buffer): + if self._write_callback: + callback = self._write_callback + self._write_callback = None + self._run_callback(callback) + + def _consume(self, loc): + # Consume loc bytes from the read buffer and return them + if loc == 0: + return b"" + assert loc <= self._read_buffer_size + # Slice the bytearray buffer into bytes, without intermediate copying + b = (memoryview(self._read_buffer) + [self._read_buffer_pos:self._read_buffer_pos + loc] + ).tobytes() + self._read_buffer_pos += loc + self._read_buffer_size -= loc + # Amortized O(1) shrink + # (this heuristic is implemented natively in Python 3.4+ + # but is replicated here for Python 2) + if self._read_buffer_pos > self._read_buffer_size: + del self._read_buffer[:self._read_buffer_pos] + self._read_buffer_pos = 0 + return b + + def _check_closed(self): + if self.closed(): + raise StreamClosedError(real_error=self.error) + + def _maybe_add_error_listener(self): + # This method is part of an optimization: to detect a connection that + # is closed when we're not actively reading or writing, we must listen + # for read events. However, it is inefficient to do this when the + # connection is first established because we are going to read or write + # immediately anyway. Instead, we insert checks at various times to + # see if the connection is idle and add the read listener then. + if self._pending_callbacks != 0: + return + if self._state is None or self._state == ioloop.IOLoop.ERROR: + if self.closed(): + self._maybe_run_close_callback() + elif (self._read_buffer_size == 0 and + self._close_callback is not None): + self._add_io_state(ioloop.IOLoop.READ) + + def _add_io_state(self, state): + """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler. + + Implementation notes: Reads and writes have a fast path and a + slow path. The fast path reads synchronously from socket + buffers, while the slow path uses `_add_io_state` to schedule + an IOLoop callback. Note that in both cases, the callback is + run asynchronously with `_run_callback`. + + To detect closed connections, we must have called + `_add_io_state` at some point, but we want to delay this as + much as possible so we don't have to set an `IOLoop.ERROR` + listener that will be overwritten by the next slow-path + operation. As long as there are callbacks scheduled for + fast-path ops, those callbacks may do more reads. + If a sequence of fast-path ops do not end in a slow-path op, + (e.g. for an @asynchronous long-poll request), we must add + the error handler. This is done in `_run_callback` and `write` + (since the write callback is optional so we can have a + fast-path write with no `_run_callback`) + """ + if self.closed(): + # connection has been closed, so there can be no future events + return + if self._state is None: + self._state = ioloop.IOLoop.ERROR | state + with stack_context.NullContext(): + self.io_loop.add_handler( + self.fileno(), self._handle_events, self._state) + elif not self._state & state: + self._state = self._state | state + self.io_loop.update_handler(self.fileno(), self._state) + + def _is_connreset(self, exc): + """Return true if exc is ECONNRESET or equivalent. + + May be overridden in subclasses. + """ + return (isinstance(exc, (socket.error, IOError)) and + errno_from_exception(exc) in _ERRNO_CONNRESET) + + +class IOStream(BaseIOStream): + r"""Socket-based `IOStream` implementation. + + This class supports the read and write methods from `BaseIOStream` + plus a `connect` method. + + The ``socket`` parameter may either be connected or unconnected. + For server operations the socket is the result of calling + `socket.accept `. For client operations the + socket is created with `socket.socket`, and may either be + connected before passing it to the `IOStream` or connected with + `IOStream.connect`. + + A very simple (and broken) HTTP client using this class: + + .. testcode:: + + import tornado.ioloop + import tornado.iostream + import socket + + async def main(): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + stream = tornado.iostream.IOStream(s) + await stream.connect(("friendfeed.com", 80)) + await stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n") + header_data = await stream.read_until(b"\r\n\r\n") + headers = {} + for line in header_data.split(b"\r\n"): + parts = line.split(b":") + if len(parts) == 2: + headers[parts[0].strip()] = parts[1].strip() + body_data = await stream.read_bytes(int(headers[b"Content-Length"])) + print(body_data) + stream.close() + + if __name__ == '__main__': + tornado.ioloop.IOLoop.current().run_sync(main) + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + stream = tornado.iostream.IOStream(s) + stream.connect(("friendfeed.com", 80), send_request) + tornado.ioloop.IOLoop.current().start() + + .. testoutput:: + :hide: + + """ + def __init__(self, socket, *args, **kwargs): + self.socket = socket + self.socket.setblocking(False) + super(IOStream, self).__init__(*args, **kwargs) + + def fileno(self): + return self.socket + + def close_fd(self): + self.socket.close() + self.socket = None + + def get_fd_error(self): + errno = self.socket.getsockopt(socket.SOL_SOCKET, + socket.SO_ERROR) + return socket.error(errno, os.strerror(errno)) + + def read_from_fd(self, buf): + try: + return self.socket.recv_into(buf) + except socket.error as e: + if e.args[0] in _ERRNO_WOULDBLOCK: + return None + else: + raise + finally: + buf = None + + def write_to_fd(self, data): + try: + return self.socket.send(data) + finally: + # Avoid keeping to data, which can be a memoryview. + # See https://github.com/tornadoweb/tornado/pull/2008 + del data + + def connect(self, address, callback=None, server_hostname=None): + """Connects the socket to a remote address without blocking. + + May only be called if the socket passed to the constructor was + not previously connected. The address parameter is in the + same format as for `socket.connect ` for + the type of socket passed to the IOStream constructor, + e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, + but will be resolved synchronously and block the IOLoop. + If you have a hostname instead of an IP address, the `.TCPClient` + class is recommended instead of calling this method directly. + `.TCPClient` will do asynchronous DNS resolution and handle + both IPv4 and IPv6. + + If ``callback`` is specified, it will be called with no + arguments when the connection is completed; if not this method + returns a `.Future` (whose result after a successful + connection will be the stream itself). + + In SSL mode, the ``server_hostname`` parameter will be used + for certificate validation (unless disabled in the + ``ssl_options``) and SNI (if supported; requires Python + 2.7.9+). + + Note that it is safe to call `IOStream.write + ` while the connection is pending, in + which case the data will be written as soon as the connection + is ready. Calling `IOStream` read methods before the socket is + connected works on some platforms but is non-portable. + + .. versionchanged:: 4.0 + If no callback is given, returns a `.Future`. + + .. versionchanged:: 4.2 + SSL certificates are validated by default; pass + ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a + suitably-configured `ssl.SSLContext` to the + `SSLIOStream` constructor to disable. + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + + """ + self._connecting = True + if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) + self._connect_callback = stack_context.wrap(callback) + future = None + else: + future = self._connect_future = Future() + try: + self.socket.connect(address) + except socket.error as e: + # In non-blocking mode we expect connect() to raise an + # exception with EINPROGRESS or EWOULDBLOCK. + # + # On freebsd, other errors such as ECONNREFUSED may be + # returned immediately when attempting to connect to + # localhost, so handle them the same way as an error + # reported later in _handle_connect. + if (errno_from_exception(e) not in _ERRNO_INPROGRESS and + errno_from_exception(e) not in _ERRNO_WOULDBLOCK): + if future is None: + gen_log.warning("Connect error on fd %s: %s", + self.socket.fileno(), e) + self.close(exc_info=e) + return future + self._add_io_state(self.io_loop.WRITE) + return future + + def start_tls(self, server_side, ssl_options=None, server_hostname=None): + """Convert this `IOStream` to an `SSLIOStream`. + + This enables protocols that begin in clear-text mode and + switch to SSL after some initial negotiation (such as the + ``STARTTLS`` extension to SMTP and IMAP). + + This method cannot be used if there are outstanding reads + or writes on the stream, or if there is any data in the + IOStream's buffer (data in the operating system's socket + buffer is allowed). This means it must generally be used + immediately after reading or writing the last clear-text + data. It can also be used immediately after connecting, + before any reads or writes. + + The ``ssl_options`` argument may be either an `ssl.SSLContext` + object or a dictionary of keyword arguments for the + `ssl.wrap_socket` function. The ``server_hostname`` argument + will be used for certificate validation unless disabled + in the ``ssl_options``. + + This method returns a `.Future` whose result is the new + `SSLIOStream`. After this method has been called, + any other operation on the original stream is undefined. + + If a close callback is defined on this stream, it will be + transferred to the new stream. + + .. versionadded:: 4.0 + + .. versionchanged:: 4.2 + SSL certificates are validated by default; pass + ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a + suitably-configured `ssl.SSLContext` to disable. + """ + if (self._read_callback or self._read_future or + self._write_callback or self._write_futures or + self._connect_callback or self._connect_future or + self._pending_callbacks or self._closed or + self._read_buffer or self._write_buffer): + raise ValueError("IOStream is not idle; cannot convert to SSL") + if ssl_options is None: + if server_side: + ssl_options = _server_ssl_defaults + else: + ssl_options = _client_ssl_defaults + + socket = self.socket + self.io_loop.remove_handler(socket) + self.socket = None + socket = ssl_wrap_socket(socket, ssl_options, + server_hostname=server_hostname, + server_side=server_side, + do_handshake_on_connect=False) + orig_close_callback = self._close_callback + self._close_callback = None + + future = Future() + ssl_stream = SSLIOStream(socket, ssl_options=ssl_options) + # Wrap the original close callback so we can fail our Future as well. + # If we had an "unwrap" counterpart to this method we would need + # to restore the original callback after our Future resolves + # so that repeated wrap/unwrap calls don't build up layers. + + def close_callback(): + if not future.done(): + # Note that unlike most Futures returned by IOStream, + # this one passes the underlying error through directly + # instead of wrapping everything in a StreamClosedError + # with a real_error attribute. This is because once the + # connection is established it's more helpful to raise + # the SSLError directly than to hide it behind a + # StreamClosedError (and the client is expecting SSL + # issues rather than network issues since this method is + # named start_tls). + future.set_exception(ssl_stream.error or StreamClosedError()) + if orig_close_callback is not None: + orig_close_callback() + ssl_stream.set_close_callback(close_callback) + ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream) + ssl_stream.max_buffer_size = self.max_buffer_size + ssl_stream.read_chunk_size = self.read_chunk_size + return future + + def _handle_connect(self): + try: + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + except socket.error as e: + # Hurd doesn't allow SO_ERROR for loopback sockets because all + # errors for such sockets are reported synchronously. + if errno_from_exception(e) == errno.ENOPROTOOPT: + err = 0 + if err != 0: + self.error = socket.error(err, os.strerror(err)) + # IOLoop implementations may vary: some of them return + # an error state before the socket becomes writable, so + # in that case a connection failure would be handled by the + # error path in _handle_events instead of here. + if self._connect_future is None: + gen_log.warning("Connect error on fd %s: %s", + self.socket.fileno(), errno.errorcode[err]) + self.close() + return + if self._connect_callback is not None: + callback = self._connect_callback + self._connect_callback = None + self._run_callback(callback) + if self._connect_future is not None: + future = self._connect_future + self._connect_future = None + future.set_result(self) + self._connecting = False + + def set_nodelay(self, value): + if (self.socket is not None and + self.socket.family in (socket.AF_INET, socket.AF_INET6)): + try: + self.socket.setsockopt(socket.IPPROTO_TCP, + socket.TCP_NODELAY, 1 if value else 0) + except socket.error as e: + # Sometimes setsockopt will fail if the socket is closed + # at the wrong time. This can happen with HTTPServer + # resetting the value to false between requests. + if e.errno != errno.EINVAL and not self._is_connreset(e): + raise + + +class SSLIOStream(IOStream): + """A utility class to write to and read from a non-blocking SSL socket. + + If the socket passed to the constructor is already connected, + it should be wrapped with:: + + ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs) + + before constructing the `SSLIOStream`. Unconnected sockets will be + wrapped when `IOStream.connect` is finished. + """ + def __init__(self, *args, **kwargs): + """The ``ssl_options`` keyword argument may either be an + `ssl.SSLContext` object or a dictionary of keywords arguments + for `ssl.wrap_socket` + """ + self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults) + super(SSLIOStream, self).__init__(*args, **kwargs) + self._ssl_accepting = True + self._handshake_reading = False + self._handshake_writing = False + self._ssl_connect_callback = None + self._server_hostname = None + + # If the socket is already connected, attempt to start the handshake. + try: + self.socket.getpeername() + except socket.error: + pass + else: + # Indirectly start the handshake, which will run on the next + # IOLoop iteration and then the real IO state will be set in + # _handle_events. + self._add_io_state(self.io_loop.WRITE) + + def reading(self): + return self._handshake_reading or super(SSLIOStream, self).reading() + + def writing(self): + return self._handshake_writing or super(SSLIOStream, self).writing() + + def _do_ssl_handshake(self): + # Based on code from test_ssl.py in the python stdlib + try: + self._handshake_reading = False + self._handshake_writing = False + self.socket.do_handshake() + except ssl.SSLError as err: + if err.args[0] == ssl.SSL_ERROR_WANT_READ: + self._handshake_reading = True + return + elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: + self._handshake_writing = True + return + elif err.args[0] in (ssl.SSL_ERROR_EOF, + ssl.SSL_ERROR_ZERO_RETURN): + return self.close(exc_info=err) + elif err.args[0] == ssl.SSL_ERROR_SSL: + try: + peer = self.socket.getpeername() + except Exception: + peer = '(not connected)' + gen_log.warning("SSL Error on %s %s: %s", + self.socket.fileno(), peer, err) + return self.close(exc_info=err) + raise + except socket.error as err: + # Some port scans (e.g. nmap in -sT mode) have been known + # to cause do_handshake to raise EBADF and ENOTCONN, so make + # those errors quiet as well. + # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0 + if (self._is_connreset(err) or + err.args[0] in (errno.EBADF, errno.ENOTCONN)): + return self.close(exc_info=err) + raise + except AttributeError as err: + # On Linux, if the connection was reset before the call to + # wrap_socket, do_handshake will fail with an + # AttributeError. + return self.close(exc_info=err) + else: + self._ssl_accepting = False + if not self._verify_cert(self.socket.getpeercert()): + self.close() + return + self._run_ssl_connect_callback() + + def _run_ssl_connect_callback(self): + if self._ssl_connect_callback is not None: + callback = self._ssl_connect_callback + self._ssl_connect_callback = None + self._run_callback(callback) + if self._ssl_connect_future is not None: + future = self._ssl_connect_future + self._ssl_connect_future = None + future.set_result(self) + + def _verify_cert(self, peercert): + """Returns True if peercert is valid according to the configured + validation mode and hostname. + + The ssl handshake already tested the certificate for a valid + CA signature; the only thing that remains is to check + the hostname. + """ + if isinstance(self._ssl_options, dict): + verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE) + elif isinstance(self._ssl_options, ssl.SSLContext): + verify_mode = self._ssl_options.verify_mode + assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL) + if verify_mode == ssl.CERT_NONE or self._server_hostname is None: + return True + cert = self.socket.getpeercert() + if cert is None and verify_mode == ssl.CERT_REQUIRED: + gen_log.warning("No SSL certificate given") + return False + try: + ssl.match_hostname(peercert, self._server_hostname) + except ssl.CertificateError as e: + gen_log.warning("Invalid SSL certificate: %s" % e) + return False + else: + return True + + def _handle_read(self): + if self._ssl_accepting: + self._do_ssl_handshake() + return + super(SSLIOStream, self)._handle_read() + + def _handle_write(self): + if self._ssl_accepting: + self._do_ssl_handshake() + return + super(SSLIOStream, self)._handle_write() + + def connect(self, address, callback=None, server_hostname=None): + self._server_hostname = server_hostname + # Ignore the result of connect(). If it fails, + # wait_for_handshake will raise an error too. This is + # necessary for the old semantics of the connect callback + # (which takes no arguments). In 6.0 this can be refactored to + # be a regular coroutine. + fut = super(SSLIOStream, self).connect(address) + fut.add_done_callback(lambda f: f.exception()) + return self.wait_for_handshake(callback) + + def _handle_connect(self): + # Call the superclass method to check for errors. + super(SSLIOStream, self)._handle_connect() + if self.closed(): + return + # When the connection is complete, wrap the socket for SSL + # traffic. Note that we do this by overriding _handle_connect + # instead of by passing a callback to super().connect because + # user callbacks are enqueued asynchronously on the IOLoop, + # but since _handle_events calls _handle_connect immediately + # followed by _handle_write we need this to be synchronous. + # + # The IOLoop will get confused if we swap out self.socket while the + # fd is registered, so remove it now and re-register after + # wrap_socket(). + self.io_loop.remove_handler(self.socket) + old_state = self._state + self._state = None + self.socket = ssl_wrap_socket(self.socket, self._ssl_options, + server_hostname=self._server_hostname, + do_handshake_on_connect=False) + self._add_io_state(old_state) + + def wait_for_handshake(self, callback=None): + """Wait for the initial SSL handshake to complete. + + If a ``callback`` is given, it will be called with no + arguments once the handshake is complete; otherwise this + method returns a `.Future` which will resolve to the + stream itself after the handshake is complete. + + Once the handshake is complete, information such as + the peer's certificate and NPN/ALPN selections may be + accessed on ``self.socket``. + + This method is intended for use on server-side streams + or after using `IOStream.start_tls`; it should not be used + with `IOStream.connect` (which already waits for the + handshake to complete). It may only be called once per stream. + + .. versionadded:: 4.2 + + .. deprecated:: 5.1 + + The ``callback`` argument is deprecated and will be removed + in Tornado 6.0. Use the returned `.Future` instead. + + """ + if (self._ssl_connect_callback is not None or + self._ssl_connect_future is not None): + raise RuntimeError("Already waiting") + if callback is not None: + warnings.warn("callback argument is deprecated, use returned Future instead", + DeprecationWarning) + self._ssl_connect_callback = stack_context.wrap(callback) + future = None + else: + future = self._ssl_connect_future = Future() + if not self._ssl_accepting: + self._run_ssl_connect_callback() + return future + + def write_to_fd(self, data): + try: + return self.socket.send(data) + except ssl.SSLError as e: + if e.args[0] == ssl.SSL_ERROR_WANT_WRITE: + # In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if + # the socket is not writeable; we need to transform this into + # an EWOULDBLOCK socket.error or a zero return value, + # either of which will be recognized by the caller of this + # method. Prior to Python 3.5, an unwriteable socket would + # simply return 0 bytes written. + return 0 + raise + finally: + # Avoid keeping to data, which can be a memoryview. + # See https://github.com/tornadoweb/tornado/pull/2008 + del data + + def read_from_fd(self, buf): + try: + if self._ssl_accepting: + # If the handshake hasn't finished yet, there can't be anything + # to read (attempting to read may or may not raise an exception + # depending on the SSL version) + return None + try: + return self.socket.recv_into(buf) + except ssl.SSLError as e: + # SSLError is a subclass of socket.error, so this except + # block must come first. + if e.args[0] == ssl.SSL_ERROR_WANT_READ: + return None + else: + raise + except socket.error as e: + if e.args[0] in _ERRNO_WOULDBLOCK: + return None + else: + raise + finally: + buf = None + + def _is_connreset(self, e): + if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF: + return True + return super(SSLIOStream, self)._is_connreset(e) + + +class PipeIOStream(BaseIOStream): + """Pipe-based `IOStream` implementation. + + The constructor takes an integer file descriptor (such as one returned + by `os.pipe`) rather than an open file object. Pipes are generally + one-way, so a `PipeIOStream` can be used for reading or writing but not + both. + """ + def __init__(self, fd, *args, **kwargs): + self.fd = fd + self._fio = io.FileIO(self.fd, "r+") + _set_nonblocking(fd) + super(PipeIOStream, self).__init__(*args, **kwargs) + + def fileno(self): + return self.fd + + def close_fd(self): + self._fio.close() + + def write_to_fd(self, data): + try: + return os.write(self.fd, data) + finally: + # Avoid keeping to data, which can be a memoryview. + # See https://github.com/tornadoweb/tornado/pull/2008 + del data + + def read_from_fd(self, buf): + try: + return self._fio.readinto(buf) + except (IOError, OSError) as e: + if errno_from_exception(e) == errno.EBADF: + # If the writing half of a pipe is closed, select will + # report it as readable but reads will fail with EBADF. + self.close(exc_info=e) + return None + else: + raise + finally: + buf = None + + +def doctests(): + import doctest + return doctest.DocTestSuite() diff --git a/server/www/packages/packages-windows/x86/tornado/locale.py b/server/www/packages/packages-windows/x86/tornado/locale.py new file mode 100644 index 0000000..d45172f --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/locale.py @@ -0,0 +1,521 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Translation methods for generating localized strings. + +To load a locale and generate a translated string:: + + user_locale = tornado.locale.get("es_LA") + print(user_locale.translate("Sign out")) + +`tornado.locale.get()` returns the closest matching locale, not necessarily the +specific locale you requested. You can support pluralization with +additional arguments to `~Locale.translate()`, e.g.:: + + people = [...] + message = user_locale.translate( + "%(list)s is online", "%(list)s are online", len(people)) + print(message % {"list": user_locale.list(people)}) + +The first string is chosen if ``len(people) == 1``, otherwise the second +string is chosen. + +Applications should call one of `load_translations` (which uses a simple +CSV format) or `load_gettext_translations` (which uses the ``.mo`` format +supported by `gettext` and related tools). If neither method is called, +the `Locale.translate` method will simply return the original string. +""" + +from __future__ import absolute_import, division, print_function + +import codecs +import csv +import datetime +from io import BytesIO +import numbers +import os +import re + +from tornado import escape +from tornado.log import gen_log +from tornado.util import PY3 + +from tornado._locale_data import LOCALE_NAMES + +_default_locale = "en_US" +_translations = {} # type: dict +_supported_locales = frozenset([_default_locale]) +_use_gettext = False +CONTEXT_SEPARATOR = "\x04" + + +def get(*locale_codes): + """Returns the closest match for the given locale codes. + + We iterate over all given locale codes in order. If we have a tight + or a loose match for the code (e.g., "en" for "en_US"), we return + the locale. Otherwise we move to the next code in the list. + + By default we return ``en_US`` if no translations are found for any of + the specified locales. You can change the default locale with + `set_default_locale()`. + """ + return Locale.get_closest(*locale_codes) + + +def set_default_locale(code): + """Sets the default locale. + + The default locale is assumed to be the language used for all strings + in the system. The translations loaded from disk are mappings from + the default locale to the destination locale. Consequently, you don't + need to create a translation file for the default locale. + """ + global _default_locale + global _supported_locales + _default_locale = code + _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) + + +def load_translations(directory, encoding=None): + """Loads translations from CSV files in a directory. + + Translations are strings with optional Python-style named placeholders + (e.g., ``My name is %(name)s``) and their associated translations. + + The directory should have translation files of the form ``LOCALE.csv``, + e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, + translation, and an optional plural indicator. Plural indicators should + be one of "plural" or "singular". A given string can have both singular + and plural forms. For example ``%(name)s liked this`` may have a + different verb conjugation depending on whether %(name)s is one + name or a list of names. There should be two rows in the CSV file for + that string, one with plural indicator "singular", and one "plural". + For strings with no verbs that would change on translation, simply + use "unknown" or the empty string (or don't include the column at all). + + The file is read using the `csv` module in the default "excel" dialect. + In this format there should not be spaces after the commas. + + If no ``encoding`` parameter is given, the encoding will be + detected automatically (among UTF-8 and UTF-16) if the file + contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM + is present. + + Example translation ``es_LA.csv``:: + + "I love you","Te amo" + "%(name)s liked this","A %(name)s les gustó esto","plural" + "%(name)s liked this","A %(name)s le gustó esto","singular" + + .. versionchanged:: 4.3 + Added ``encoding`` parameter. Added support for BOM-based encoding + detection, UTF-16, and UTF-8-with-BOM. + """ + global _translations + global _supported_locales + _translations = {} + for path in os.listdir(directory): + if not path.endswith(".csv"): + continue + locale, extension = path.split(".") + if not re.match("[a-z]+(_[A-Z]+)?$", locale): + gen_log.error("Unrecognized locale %r (path: %s)", locale, + os.path.join(directory, path)) + continue + full_path = os.path.join(directory, path) + if encoding is None: + # Try to autodetect encoding based on the BOM. + with open(full_path, 'rb') as f: + data = f.read(len(codecs.BOM_UTF16_LE)) + if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + encoding = 'utf-16' + else: + # utf-8-sig is "utf-8 with optional BOM". It's discouraged + # in most cases but is common with CSV files because Excel + # cannot read utf-8 files without a BOM. + encoding = 'utf-8-sig' + if PY3: + # python 3: csv.reader requires a file open in text mode. + # Force utf8 to avoid dependence on $LANG environment variable. + f = open(full_path, "r", encoding=encoding) + else: + # python 2: csv can only handle byte strings (in ascii-compatible + # encodings), which we decode below. Transcode everything into + # utf8 before passing it to csv.reader. + f = BytesIO() + with codecs.open(full_path, "r", encoding=encoding) as infile: + f.write(escape.utf8(infile.read())) + f.seek(0) + _translations[locale] = {} + for i, row in enumerate(csv.reader(f)): + if not row or len(row) < 2: + continue + row = [escape.to_unicode(c).strip() for c in row] + english, translation = row[:2] + if len(row) > 2: + plural = row[2] or "unknown" + else: + plural = "unknown" + if plural not in ("plural", "singular", "unknown"): + gen_log.error("Unrecognized plural indicator %r in %s line %d", + plural, path, i + 1) + continue + _translations[locale].setdefault(plural, {})[english] = translation + f.close() + _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) + gen_log.debug("Supported locales: %s", sorted(_supported_locales)) + + +def load_gettext_translations(directory, domain): + """Loads translations from `gettext`'s locale tree + + Locale tree is similar to system's ``/usr/share/locale``, like:: + + {directory}/{lang}/LC_MESSAGES/{domain}.mo + + Three steps are required to have your app translated: + + 1. Generate POT translation file:: + + xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc + + 2. Merge against existing POT file:: + + msgmerge old.po mydomain.po > new.po + + 3. Compile:: + + msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo + """ + import gettext + global _translations + global _supported_locales + global _use_gettext + _translations = {} + for lang in os.listdir(directory): + if lang.startswith('.'): + continue # skip .svn, etc + if os.path.isfile(os.path.join(directory, lang)): + continue + try: + os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) + _translations[lang] = gettext.translation(domain, directory, + languages=[lang]) + except Exception as e: + gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) + continue + _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) + _use_gettext = True + gen_log.debug("Supported locales: %s", sorted(_supported_locales)) + + +def get_supported_locales(): + """Returns a list of all the supported locale codes.""" + return _supported_locales + + +class Locale(object): + """Object representing a locale. + + After calling one of `load_translations` or `load_gettext_translations`, + call `get` or `get_closest` to get a Locale object. + """ + @classmethod + def get_closest(cls, *locale_codes): + """Returns the closest match for the given locale code.""" + for code in locale_codes: + if not code: + continue + code = code.replace("-", "_") + parts = code.split("_") + if len(parts) > 2: + continue + elif len(parts) == 2: + code = parts[0].lower() + "_" + parts[1].upper() + if code in _supported_locales: + return cls.get(code) + if parts[0].lower() in _supported_locales: + return cls.get(parts[0].lower()) + return cls.get(_default_locale) + + @classmethod + def get(cls, code): + """Returns the Locale for the given locale code. + + If it is not supported, we raise an exception. + """ + if not hasattr(cls, "_cache"): + cls._cache = {} + if code not in cls._cache: + assert code in _supported_locales + translations = _translations.get(code, None) + if translations is None: + locale = CSVLocale(code, {}) + elif _use_gettext: + locale = GettextLocale(code, translations) + else: + locale = CSVLocale(code, translations) + cls._cache[code] = locale + return cls._cache[code] + + def __init__(self, code, translations): + self.code = code + self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown") + self.rtl = False + for prefix in ["fa", "ar", "he"]: + if self.code.startswith(prefix): + self.rtl = True + break + self.translations = translations + + # Initialize strings for date formatting + _ = self.translate + self._months = [ + _("January"), _("February"), _("March"), _("April"), + _("May"), _("June"), _("July"), _("August"), + _("September"), _("October"), _("November"), _("December")] + self._weekdays = [ + _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), + _("Friday"), _("Saturday"), _("Sunday")] + + def translate(self, message, plural_message=None, count=None): + """Returns the translation for the given message for this locale. + + If ``plural_message`` is given, you must also provide + ``count``. We return ``plural_message`` when ``count != 1``, + and we return the singular form for the given message when + ``count == 1``. + """ + raise NotImplementedError() + + def pgettext(self, context, message, plural_message=None, count=None): + raise NotImplementedError() + + def format_date(self, date, gmt_offset=0, relative=True, shorter=False, + full_format=False): + """Formats the given date (which should be GMT). + + By default, we return a relative time (e.g., "2 minutes ago"). You + can return an absolute date string with ``relative=False``. + + You can force a full format date ("July 10, 1980") with + ``full_format=True``. + + This method is primarily intended for dates in the past. + For dates in the future, we fall back to full format. + """ + if isinstance(date, numbers.Real): + date = datetime.datetime.utcfromtimestamp(date) + now = datetime.datetime.utcnow() + if date > now: + if relative and (date - now).seconds < 60: + # Due to click skew, things are some things slightly + # in the future. Round timestamps in the immediate + # future down to now in relative mode. + date = now + else: + # Otherwise, future dates always use the full format. + full_format = True + local_date = date - datetime.timedelta(minutes=gmt_offset) + local_now = now - datetime.timedelta(minutes=gmt_offset) + local_yesterday = local_now - datetime.timedelta(hours=24) + difference = now - date + seconds = difference.seconds + days = difference.days + + _ = self.translate + format = None + if not full_format: + if relative and days == 0: + if seconds < 50: + return _("1 second ago", "%(seconds)d seconds ago", + seconds) % {"seconds": seconds} + + if seconds < 50 * 60: + minutes = round(seconds / 60.0) + return _("1 minute ago", "%(minutes)d minutes ago", + minutes) % {"minutes": minutes} + + hours = round(seconds / (60.0 * 60)) + return _("1 hour ago", "%(hours)d hours ago", + hours) % {"hours": hours} + + if days == 0: + format = _("%(time)s") + elif days == 1 and local_date.day == local_yesterday.day and \ + relative: + format = _("yesterday") if shorter else \ + _("yesterday at %(time)s") + elif days < 5: + format = _("%(weekday)s") if shorter else \ + _("%(weekday)s at %(time)s") + elif days < 334: # 11mo, since confusing for same month last year + format = _("%(month_name)s %(day)s") if shorter else \ + _("%(month_name)s %(day)s at %(time)s") + + if format is None: + format = _("%(month_name)s %(day)s, %(year)s") if shorter else \ + _("%(month_name)s %(day)s, %(year)s at %(time)s") + + tfhour_clock = self.code not in ("en", "en_US", "zh_CN") + if tfhour_clock: + str_time = "%d:%02d" % (local_date.hour, local_date.minute) + elif self.code == "zh_CN": + str_time = "%s%d:%02d" % ( + (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12], + local_date.hour % 12 or 12, local_date.minute) + else: + str_time = "%d:%02d %s" % ( + local_date.hour % 12 or 12, local_date.minute, + ("am", "pm")[local_date.hour >= 12]) + + return format % { + "month_name": self._months[local_date.month - 1], + "weekday": self._weekdays[local_date.weekday()], + "day": str(local_date.day), + "year": str(local_date.year), + "time": str_time + } + + def format_day(self, date, gmt_offset=0, dow=True): + """Formats the given date as a day of week. + + Example: "Monday, January 22". You can remove the day of week with + ``dow=False``. + """ + local_date = date - datetime.timedelta(minutes=gmt_offset) + _ = self.translate + if dow: + return _("%(weekday)s, %(month_name)s %(day)s") % { + "month_name": self._months[local_date.month - 1], + "weekday": self._weekdays[local_date.weekday()], + "day": str(local_date.day), + } + else: + return _("%(month_name)s %(day)s") % { + "month_name": self._months[local_date.month - 1], + "day": str(local_date.day), + } + + def list(self, parts): + """Returns a comma-separated list for the given list of parts. + + The format is, e.g., "A, B and C", "A and B" or just "A" for lists + of size 1. + """ + _ = self.translate + if len(parts) == 0: + return "" + if len(parts) == 1: + return parts[0] + comma = u' \u0648 ' if self.code.startswith("fa") else u", " + return _("%(commas)s and %(last)s") % { + "commas": comma.join(parts[:-1]), + "last": parts[len(parts) - 1], + } + + def friendly_number(self, value): + """Returns a comma-separated number for the given integer.""" + if self.code not in ("en", "en_US"): + return str(value) + value = str(value) + parts = [] + while value: + parts.append(value[-3:]) + value = value[:-3] + return ",".join(reversed(parts)) + + +class CSVLocale(Locale): + """Locale implementation using tornado's CSV translation format.""" + def translate(self, message, plural_message=None, count=None): + if plural_message is not None: + assert count is not None + if count != 1: + message = plural_message + message_dict = self.translations.get("plural", {}) + else: + message_dict = self.translations.get("singular", {}) + else: + message_dict = self.translations.get("unknown", {}) + return message_dict.get(message, message) + + def pgettext(self, context, message, plural_message=None, count=None): + if self.translations: + gen_log.warning('pgettext is not supported by CSVLocale') + return self.translate(message, plural_message, count) + + +class GettextLocale(Locale): + """Locale implementation using the `gettext` module.""" + def __init__(self, code, translations): + try: + # python 2 + self.ngettext = translations.ungettext + self.gettext = translations.ugettext + except AttributeError: + # python 3 + self.ngettext = translations.ngettext + self.gettext = translations.gettext + # self.gettext must exist before __init__ is called, since it + # calls into self.translate + super(GettextLocale, self).__init__(code, translations) + + def translate(self, message, plural_message=None, count=None): + if plural_message is not None: + assert count is not None + return self.ngettext(message, plural_message, count) + else: + return self.gettext(message) + + def pgettext(self, context, message, plural_message=None, count=None): + """Allows to set context for translation, accepts plural forms. + + Usage example:: + + pgettext("law", "right") + pgettext("good", "right") + + Plural message example:: + + pgettext("organization", "club", "clubs", len(clubs)) + pgettext("stick", "club", "clubs", len(clubs)) + + To generate POT file with context, add following options to step 1 + of `load_gettext_translations` sequence:: + + xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 + + .. versionadded:: 4.2 + """ + if plural_message is not None: + assert count is not None + msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, message), + "%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message), + count) + result = self.ngettext(*msgs_with_ctxt) + if CONTEXT_SEPARATOR in result: + # Translation not found + result = self.ngettext(message, plural_message, count) + return result + else: + msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) + result = self.gettext(msg_with_ctxt) + if CONTEXT_SEPARATOR in result: + # Translation not found + result = message + return result diff --git a/server/www/packages/packages-windows/x86/tornado/locks.py b/server/www/packages/packages-windows/x86/tornado/locks.py new file mode 100644 index 0000000..9566a45 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/locks.py @@ -0,0 +1,526 @@ +# Copyright 2015 The Tornado Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function + +import collections +from concurrent.futures import CancelledError + +from tornado import gen, ioloop +from tornado.concurrent import Future, future_set_result_unless_cancelled + +__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock'] + + +class _TimeoutGarbageCollector(object): + """Base class for objects that periodically clean up timed-out waiters. + + Avoids memory leak in a common pattern like: + + while True: + yield condition.wait(short_timeout) + print('looping....') + """ + def __init__(self): + self._waiters = collections.deque() # Futures. + self._timeouts = 0 + + def _garbage_collect(self): + # Occasionally clear timed-out waiters. + self._timeouts += 1 + if self._timeouts > 100: + self._timeouts = 0 + self._waiters = collections.deque( + w for w in self._waiters if not w.done()) + + +class Condition(_TimeoutGarbageCollector): + """A condition allows one or more coroutines to wait until notified. + + Like a standard `threading.Condition`, but does not need an underlying lock + that is acquired and released. + + With a `Condition`, coroutines can wait to be notified by other coroutines: + + .. testcode:: + + from tornado import gen + from tornado.ioloop import IOLoop + from tornado.locks import Condition + + condition = Condition() + + async def waiter(): + print("I'll wait right here") + await condition.wait() + print("I'm done waiting") + + async def notifier(): + print("About to notify") + condition.notify() + print("Done notifying") + + async def runner(): + # Wait for waiter() and notifier() in parallel + await gen.multi([waiter(), notifier()]) + + IOLoop.current().run_sync(runner) + + .. testoutput:: + + I'll wait right here + About to notify + Done notifying + I'm done waiting + + `wait` takes an optional ``timeout`` argument, which is either an absolute + timestamp:: + + io_loop = IOLoop.current() + + # Wait up to 1 second for a notification. + await condition.wait(timeout=io_loop.time() + 1) + + ...or a `datetime.timedelta` for a timeout relative to the current time:: + + # Wait up to 1 second. + await condition.wait(timeout=datetime.timedelta(seconds=1)) + + The method returns False if there's no notification before the deadline. + + .. versionchanged:: 5.0 + Previously, waiters could be notified synchronously from within + `notify`. Now, the notification will always be received on the + next iteration of the `.IOLoop`. + """ + + def __init__(self): + super(Condition, self).__init__() + self.io_loop = ioloop.IOLoop.current() + + def __repr__(self): + result = '<%s' % (self.__class__.__name__, ) + if self._waiters: + result += ' waiters[%s]' % len(self._waiters) + return result + '>' + + def wait(self, timeout=None): + """Wait for `.notify`. + + Returns a `.Future` that resolves ``True`` if the condition is notified, + or ``False`` after a timeout. + """ + waiter = Future() + self._waiters.append(waiter) + if timeout: + def on_timeout(): + if not waiter.done(): + future_set_result_unless_cancelled(waiter, False) + self._garbage_collect() + io_loop = ioloop.IOLoop.current() + timeout_handle = io_loop.add_timeout(timeout, on_timeout) + waiter.add_done_callback( + lambda _: io_loop.remove_timeout(timeout_handle)) + return waiter + + def notify(self, n=1): + """Wake ``n`` waiters.""" + waiters = [] # Waiters we plan to run right now. + while n and self._waiters: + waiter = self._waiters.popleft() + if not waiter.done(): # Might have timed out. + n -= 1 + waiters.append(waiter) + + for waiter in waiters: + future_set_result_unless_cancelled(waiter, True) + + def notify_all(self): + """Wake all waiters.""" + self.notify(len(self._waiters)) + + +class Event(object): + """An event blocks coroutines until its internal flag is set to True. + + Similar to `threading.Event`. + + A coroutine can wait for an event to be set. Once it is set, calls to + ``yield event.wait()`` will not block unless the event has been cleared: + + .. testcode:: + + from tornado import gen + from tornado.ioloop import IOLoop + from tornado.locks import Event + + event = Event() + + async def waiter(): + print("Waiting for event") + await event.wait() + print("Not waiting this time") + await event.wait() + print("Done") + + async def setter(): + print("About to set the event") + event.set() + + async def runner(): + await gen.multi([waiter(), setter()]) + + IOLoop.current().run_sync(runner) + + .. testoutput:: + + Waiting for event + About to set the event + Not waiting this time + Done + """ + def __init__(self): + self._value = False + self._waiters = set() + + def __repr__(self): + return '<%s %s>' % ( + self.__class__.__name__, 'set' if self.is_set() else 'clear') + + def is_set(self): + """Return ``True`` if the internal flag is true.""" + return self._value + + def set(self): + """Set the internal flag to ``True``. All waiters are awakened. + + Calling `.wait` once the flag is set will not block. + """ + if not self._value: + self._value = True + + for fut in self._waiters: + if not fut.done(): + fut.set_result(None) + + def clear(self): + """Reset the internal flag to ``False``. + + Calls to `.wait` will block until `.set` is called. + """ + self._value = False + + def wait(self, timeout=None): + """Block until the internal flag is true. + + Returns a Future, which raises `tornado.util.TimeoutError` after a + timeout. + """ + fut = Future() + if self._value: + fut.set_result(None) + return fut + self._waiters.add(fut) + fut.add_done_callback(lambda fut: self._waiters.remove(fut)) + if timeout is None: + return fut + else: + timeout_fut = gen.with_timeout(timeout, fut, quiet_exceptions=(CancelledError,)) + # This is a slightly clumsy workaround for the fact that + # gen.with_timeout doesn't cancel its futures. Cancelling + # fut will remove it from the waiters list. + timeout_fut.add_done_callback(lambda tf: fut.cancel() if not fut.done() else None) + return timeout_fut + + +class _ReleasingContextManager(object): + """Releases a Lock or Semaphore at the end of a "with" statement. + + with (yield semaphore.acquire()): + pass + + # Now semaphore.release() has been called. + """ + def __init__(self, obj): + self._obj = obj + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + self._obj.release() + + +class Semaphore(_TimeoutGarbageCollector): + """A lock that can be acquired a fixed number of times before blocking. + + A Semaphore manages a counter representing the number of `.release` calls + minus the number of `.acquire` calls, plus an initial value. The `.acquire` + method blocks if necessary until it can return without making the counter + negative. + + Semaphores limit access to a shared resource. To allow access for two + workers at a time: + + .. testsetup:: semaphore + + from collections import deque + + from tornado import gen + from tornado.ioloop import IOLoop + from tornado.concurrent import Future + + # Ensure reliable doctest output: resolve Futures one at a time. + futures_q = deque([Future() for _ in range(3)]) + + async def simulator(futures): + for f in futures: + # simulate the asynchronous passage of time + await gen.sleep(0) + await gen.sleep(0) + f.set_result(None) + + IOLoop.current().add_callback(simulator, list(futures_q)) + + def use_some_resource(): + return futures_q.popleft() + + .. testcode:: semaphore + + from tornado import gen + from tornado.ioloop import IOLoop + from tornado.locks import Semaphore + + sem = Semaphore(2) + + async def worker(worker_id): + await sem.acquire() + try: + print("Worker %d is working" % worker_id) + await use_some_resource() + finally: + print("Worker %d is done" % worker_id) + sem.release() + + async def runner(): + # Join all workers. + await gen.multi([worker(i) for i in range(3)]) + + IOLoop.current().run_sync(runner) + + .. testoutput:: semaphore + + Worker 0 is working + Worker 1 is working + Worker 0 is done + Worker 2 is working + Worker 1 is done + Worker 2 is done + + Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until + the semaphore has been released once, by worker 0. + + The semaphore can be used as an async context manager:: + + async def worker(worker_id): + async with sem: + print("Worker %d is working" % worker_id) + await use_some_resource() + + # Now the semaphore has been released. + print("Worker %d is done" % worker_id) + + For compatibility with older versions of Python, `.acquire` is a + context manager, so ``worker`` could also be written as:: + + @gen.coroutine + def worker(worker_id): + with (yield sem.acquire()): + print("Worker %d is working" % worker_id) + yield use_some_resource() + + # Now the semaphore has been released. + print("Worker %d is done" % worker_id) + + .. versionchanged:: 4.3 + Added ``async with`` support in Python 3.5. + + """ + def __init__(self, value=1): + super(Semaphore, self).__init__() + if value < 0: + raise ValueError('semaphore initial value must be >= 0') + + self._value = value + + def __repr__(self): + res = super(Semaphore, self).__repr__() + extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format( + self._value) + if self._waiters: + extra = '{0},waiters:{1}'.format(extra, len(self._waiters)) + return '<{0} [{1}]>'.format(res[1:-1], extra) + + def release(self): + """Increment the counter and wake one waiter.""" + self._value += 1 + while self._waiters: + waiter = self._waiters.popleft() + if not waiter.done(): + self._value -= 1 + + # If the waiter is a coroutine paused at + # + # with (yield semaphore.acquire()): + # + # then the context manager's __exit__ calls release() at the end + # of the "with" block. + waiter.set_result(_ReleasingContextManager(self)) + break + + def acquire(self, timeout=None): + """Decrement the counter. Returns a Future. + + Block if the counter is zero and wait for a `.release`. The Future + raises `.TimeoutError` after the deadline. + """ + waiter = Future() + if self._value > 0: + self._value -= 1 + waiter.set_result(_ReleasingContextManager(self)) + else: + self._waiters.append(waiter) + if timeout: + def on_timeout(): + if not waiter.done(): + waiter.set_exception(gen.TimeoutError()) + self._garbage_collect() + io_loop = ioloop.IOLoop.current() + timeout_handle = io_loop.add_timeout(timeout, on_timeout) + waiter.add_done_callback( + lambda _: io_loop.remove_timeout(timeout_handle)) + return waiter + + def __enter__(self): + raise RuntimeError( + "Use Semaphore like 'with (yield semaphore.acquire())', not like" + " 'with semaphore'") + + __exit__ = __enter__ + + @gen.coroutine + def __aenter__(self): + yield self.acquire() + + @gen.coroutine + def __aexit__(self, typ, value, tb): + self.release() + + +class BoundedSemaphore(Semaphore): + """A semaphore that prevents release() being called too many times. + + If `.release` would increment the semaphore's value past the initial + value, it raises `ValueError`. Semaphores are mostly used to guard + resources with limited capacity, so a semaphore released too many times + is a sign of a bug. + """ + def __init__(self, value=1): + super(BoundedSemaphore, self).__init__(value=value) + self._initial_value = value + + def release(self): + """Increment the counter and wake one waiter.""" + if self._value >= self._initial_value: + raise ValueError("Semaphore released too many times") + super(BoundedSemaphore, self).release() + + +class Lock(object): + """A lock for coroutines. + + A Lock begins unlocked, and `acquire` locks it immediately. While it is + locked, a coroutine that yields `acquire` waits until another coroutine + calls `release`. + + Releasing an unlocked lock raises `RuntimeError`. + + A Lock can be used as an async context manager with the ``async + with`` statement: + + >>> from tornado import locks + >>> lock = locks.Lock() + >>> + >>> async def f(): + ... async with lock: + ... # Do something holding the lock. + ... pass + ... + ... # Now the lock is released. + + For compatibility with older versions of Python, the `.acquire` + method asynchronously returns a regular context manager: + + >>> async def f2(): + ... with (yield lock.acquire()): + ... # Do something holding the lock. + ... pass + ... + ... # Now the lock is released. + + .. versionchanged:: 4.3 + Added ``async with`` support in Python 3.5. + + """ + def __init__(self): + self._block = BoundedSemaphore(value=1) + + def __repr__(self): + return "<%s _block=%s>" % ( + self.__class__.__name__, + self._block) + + def acquire(self, timeout=None): + """Attempt to lock. Returns a Future. + + Returns a Future, which raises `tornado.util.TimeoutError` after a + timeout. + """ + return self._block.acquire(timeout) + + def release(self): + """Unlock. + + The first coroutine in line waiting for `acquire` gets the lock. + + If not locked, raise a `RuntimeError`. + """ + try: + self._block.release() + except ValueError: + raise RuntimeError('release unlocked lock') + + def __enter__(self): + raise RuntimeError( + "Use Lock like 'with (yield lock)', not like 'with lock'") + + __exit__ = __enter__ + + @gen.coroutine + def __aenter__(self): + yield self.acquire() + + @gen.coroutine + def __aexit__(self, typ, value, tb): + self.release() diff --git a/server/www/packages/packages-windows/x86/tornado/log.py b/server/www/packages/packages-windows/x86/tornado/log.py new file mode 100644 index 0000000..cda905c --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/log.py @@ -0,0 +1,290 @@ +# +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Logging support for Tornado. + +Tornado uses three logger streams: + +* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and + potentially other servers in the future) +* ``tornado.application``: Logging of errors from application code (i.e. + uncaught exceptions from callbacks) +* ``tornado.general``: General-purpose logging, including any errors + or warnings from Tornado itself. + +These streams may be configured independently using the standard library's +`logging` module. For example, you may wish to send ``tornado.access`` logs +to a separate file for analysis. +""" +from __future__ import absolute_import, division, print_function + +import logging +import logging.handlers +import sys + +from tornado.escape import _unicode +from tornado.util import unicode_type, basestring_type + +try: + import colorama +except ImportError: + colorama = None + +try: + import curses # type: ignore +except ImportError: + curses = None + +# Logger objects for internal tornado use +access_log = logging.getLogger("tornado.access") +app_log = logging.getLogger("tornado.application") +gen_log = logging.getLogger("tornado.general") + + +def _stderr_supports_color(): + try: + if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): + if curses: + curses.setupterm() + if curses.tigetnum("colors") > 0: + return True + elif colorama: + if sys.stderr is getattr(colorama.initialise, 'wrapped_stderr', + object()): + return True + except Exception: + # Very broad exception handling because it's always better to + # fall back to non-colored logs than to break at startup. + pass + return False + + +def _safe_unicode(s): + try: + return _unicode(s) + except UnicodeDecodeError: + return repr(s) + + +class LogFormatter(logging.Formatter): + """Log formatter used in Tornado. + + Key features of this formatter are: + + * Color support when logging to a terminal that supports it. + * Timestamps on every log line. + * Robust against str/bytes encoding problems. + + This formatter is enabled automatically by + `tornado.options.parse_command_line` or `tornado.options.parse_config_file` + (unless ``--logging=none`` is used). + + Color support on Windows versions that do not support ANSI color codes is + enabled by use of the colorama__ library. Applications that wish to use + this must first initialize colorama with a call to ``colorama.init``. + See the colorama documentation for details. + + __ https://pypi.python.org/pypi/colorama + + .. versionchanged:: 4.5 + Added support for ``colorama``. Changed the constructor + signature to be compatible with `logging.config.dictConfig`. + """ + DEFAULT_FORMAT = \ + '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' + DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' + DEFAULT_COLORS = { + logging.DEBUG: 4, # Blue + logging.INFO: 2, # Green + logging.WARNING: 3, # Yellow + logging.ERROR: 1, # Red + } + + def __init__(self, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT, + style='%', color=True, colors=DEFAULT_COLORS): + r""" + :arg bool color: Enables color support. + :arg str fmt: Log message format. + It will be applied to the attributes dict of log records. The + text between ``%(color)s`` and ``%(end_color)s`` will be colored + depending on the level if color support is on. + :arg dict colors: color mappings from logging level to terminal color + code + :arg str datefmt: Datetime format. + Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. + + .. versionchanged:: 3.2 + + Added ``fmt`` and ``datefmt`` arguments. + """ + logging.Formatter.__init__(self, datefmt=datefmt) + self._fmt = fmt + + self._colors = {} + if color and _stderr_supports_color(): + if curses is not None: + # The curses module has some str/bytes confusion in + # python3. Until version 3.2.3, most methods return + # bytes, but only accept strings. In addition, we want to + # output these strings with the logging module, which + # works with unicode strings. The explicit calls to + # unicode() below are harmless in python2 but will do the + # right conversion in python 3. + fg_color = (curses.tigetstr("setaf") or + curses.tigetstr("setf") or "") + if (3, 0) < sys.version_info < (3, 2, 3): + fg_color = unicode_type(fg_color, "ascii") + + for levelno, code in colors.items(): + self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii") + self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii") + else: + # If curses is not present (currently we'll only get here for + # colorama on windows), assume hard-coded ANSI color codes. + for levelno, code in colors.items(): + self._colors[levelno] = '\033[2;3%dm' % code + self._normal = '\033[0m' + else: + self._normal = '' + + def format(self, record): + try: + message = record.getMessage() + assert isinstance(message, basestring_type) # guaranteed by logging + # Encoding notes: The logging module prefers to work with character + # strings, but only enforces that log messages are instances of + # basestring. In python 2, non-ascii bytestrings will make + # their way through the logging framework until they blow up with + # an unhelpful decoding error (with this formatter it happens + # when we attach the prefix, but there are other opportunities for + # exceptions further along in the framework). + # + # If a byte string makes it this far, convert it to unicode to + # ensure it will make it out to the logs. Use repr() as a fallback + # to ensure that all byte strings can be converted successfully, + # but don't do it by default so we don't add extra quotes to ascii + # bytestrings. This is a bit of a hacky place to do this, but + # it's worth it since the encoding errors that would otherwise + # result are so useless (and tornado is fond of using utf8-encoded + # byte strings wherever possible). + record.message = _safe_unicode(message) + except Exception as e: + record.message = "Bad message (%r): %r" % (e, record.__dict__) + + record.asctime = self.formatTime(record, self.datefmt) + + if record.levelno in self._colors: + record.color = self._colors[record.levelno] + record.end_color = self._normal + else: + record.color = record.end_color = '' + + formatted = self._fmt % record.__dict__ + + if record.exc_info: + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) + if record.exc_text: + # exc_text contains multiple lines. We need to _safe_unicode + # each line separately so that non-utf8 bytes don't cause + # all the newlines to turn into '\n'. + lines = [formatted.rstrip()] + lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n')) + formatted = '\n'.join(lines) + return formatted.replace("\n", "\n ") + + +def enable_pretty_logging(options=None, logger=None): + """Turns on formatted logging output as configured. + + This is called automatically by `tornado.options.parse_command_line` + and `tornado.options.parse_config_file`. + """ + if options is None: + import tornado.options + options = tornado.options.options + if options.logging is None or options.logging.lower() == 'none': + return + if logger is None: + logger = logging.getLogger() + logger.setLevel(getattr(logging, options.logging.upper())) + if options.log_file_prefix: + rotate_mode = options.log_rotate_mode + if rotate_mode == 'size': + channel = logging.handlers.RotatingFileHandler( + filename=options.log_file_prefix, + maxBytes=options.log_file_max_size, + backupCount=options.log_file_num_backups) + elif rotate_mode == 'time': + channel = logging.handlers.TimedRotatingFileHandler( + filename=options.log_file_prefix, + when=options.log_rotate_when, + interval=options.log_rotate_interval, + backupCount=options.log_file_num_backups) + else: + error_message = 'The value of log_rotate_mode option should be ' +\ + '"size" or "time", not "%s".' % rotate_mode + raise ValueError(error_message) + channel.setFormatter(LogFormatter(color=False)) + logger.addHandler(channel) + + if (options.log_to_stderr or + (options.log_to_stderr is None and not logger.handlers)): + # Set up color if we are in a tty and curses is installed + channel = logging.StreamHandler() + channel.setFormatter(LogFormatter()) + logger.addHandler(channel) + + +def define_logging_options(options=None): + """Add logging-related flags to ``options``. + + These options are present automatically on the default options instance; + this method is only necessary if you have created your own `.OptionParser`. + + .. versionadded:: 4.2 + This function existed in prior versions but was broken and undocumented until 4.2. + """ + if options is None: + # late import to prevent cycle + import tornado.options + options = tornado.options.options + options.define("logging", default="info", + help=("Set the Python log level. If 'none', tornado won't touch the " + "logging configuration."), + metavar="debug|info|warning|error|none") + options.define("log_to_stderr", type=bool, default=None, + help=("Send log output to stderr (colorized if possible). " + "By default use stderr if --log_file_prefix is not set and " + "no other logging is configured.")) + options.define("log_file_prefix", type=str, default=None, metavar="PATH", + help=("Path prefix for log files. " + "Note that if you are running multiple tornado processes, " + "log_file_prefix must be different for each of them (e.g. " + "include the port number)")) + options.define("log_file_max_size", type=int, default=100 * 1000 * 1000, + help="max size of log files before rollover") + options.define("log_file_num_backups", type=int, default=10, + help="number of log files to keep") + + options.define("log_rotate_when", type=str, default='midnight', + help=("specify the type of TimedRotatingFileHandler interval " + "other options:('S', 'M', 'H', 'D', 'W0'-'W6')")) + options.define("log_rotate_interval", type=int, default=1, + help="The interval value of timed rotating") + + options.define("log_rotate_mode", type=str, default='size', + help="The mode of rotating files(time or size)") + + options.add_parse_callback(lambda: enable_pretty_logging(options)) diff --git a/server/www/packages/packages-windows/x86/tornado/netutil.py b/server/www/packages/packages-windows/x86/tornado/netutil.py new file mode 100644 index 0000000..e63683a --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/netutil.py @@ -0,0 +1,575 @@ +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Miscellaneous network utility code.""" + +from __future__ import absolute_import, division, print_function + +import errno +import os +import sys +import socket +import stat + +from tornado.concurrent import dummy_executor, run_on_executor +from tornado import gen +from tornado.ioloop import IOLoop +from tornado.platform.auto import set_close_exec +from tornado.util import PY3, Configurable, errno_from_exception + +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine + ssl = None + +if PY3: + xrange = range + +if ssl is not None: + # Note that the naming of ssl.Purpose is confusing; the purpose + # of a context is to authentiate the opposite side of the connection. + _client_ssl_defaults = ssl.create_default_context( + ssl.Purpose.SERVER_AUTH) + _server_ssl_defaults = ssl.create_default_context( + ssl.Purpose.CLIENT_AUTH) + if hasattr(ssl, 'OP_NO_COMPRESSION'): + # See netutil.ssl_options_to_context + _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION + _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION +else: + # Google App Engine + _client_ssl_defaults = dict(cert_reqs=None, + ca_certs=None) + _server_ssl_defaults = {} + +# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode, +# getaddrinfo attempts to import encodings.idna. If this is done at +# module-import time, the import lock is already held by the main thread, +# leading to deadlock. Avoid it by caching the idna encoder on the main +# thread now. +u'foo'.encode('idna') + +# For undiagnosed reasons, 'latin1' codec may also need to be preloaded. +u'foo'.encode('latin1') + +# These errnos indicate that a non-blocking operation must be retried +# at a later time. On most platforms they're the same value, but on +# some they differ. +_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) + +if hasattr(errno, "WSAEWOULDBLOCK"): + _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore + +# Default backlog used when calling sock.listen() +_DEFAULT_BACKLOG = 128 + + +def bind_sockets(port, address=None, family=socket.AF_UNSPEC, + backlog=_DEFAULT_BACKLOG, flags=None, reuse_port=False): + """Creates listening sockets bound to the given port and address. + + Returns a list of socket objects (multiple sockets are returned if + the given address maps to multiple IP addresses, which is most common + for mixed IPv4 and IPv6 use). + + Address may be either an IP address or hostname. If it's a hostname, + the server will listen on all IP addresses associated with the + name. Address may be an empty string or None to listen on all + available interfaces. Family may be set to either `socket.AF_INET` + or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise + both will be used if available. + + The ``backlog`` argument has the same meaning as for + `socket.listen() `. + + ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like + ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. + + ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket + in the list. If your platform doesn't support this option ValueError will + be raised. + """ + if reuse_port and not hasattr(socket, "SO_REUSEPORT"): + raise ValueError("the platform doesn't support SO_REUSEPORT") + + sockets = [] + if address == "": + address = None + if not socket.has_ipv6 and family == socket.AF_UNSPEC: + # Python can be compiled with --disable-ipv6, which causes + # operations on AF_INET6 sockets to fail, but does not + # automatically exclude those results from getaddrinfo + # results. + # http://bugs.python.org/issue16208 + family = socket.AF_INET + if flags is None: + flags = socket.AI_PASSIVE + bound_port = None + for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, + 0, flags)): + af, socktype, proto, canonname, sockaddr = res + if (sys.platform == 'darwin' and address == 'localhost' and + af == socket.AF_INET6 and sockaddr[3] != 0): + # Mac OS X includes a link-local address fe80::1%lo0 in the + # getaddrinfo results for 'localhost'. However, the firewall + # doesn't understand that this is a local address and will + # prompt for access (often repeatedly, due to an apparent + # bug in its ability to remember granting access to an + # application). Skip these addresses. + continue + try: + sock = socket.socket(af, socktype, proto) + except socket.error as e: + if errno_from_exception(e) == errno.EAFNOSUPPORT: + continue + raise + set_close_exec(sock.fileno()) + if os.name != 'nt': + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + except socket.error as e: + if errno_from_exception(e) != errno.ENOPROTOOPT: + # Hurd doesn't support SO_REUSEADDR. + raise + if reuse_port: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + if af == socket.AF_INET6: + # On linux, ipv6 sockets accept ipv4 too by default, + # but this makes it impossible to bind to both + # 0.0.0.0 in ipv4 and :: in ipv6. On other systems, + # separate sockets *must* be used to listen for both ipv4 + # and ipv6. For consistency, always disable ipv4 on our + # ipv6 sockets and use a separate ipv4 socket when needed. + # + # Python 2.x on windows doesn't have IPPROTO_IPV6. + if hasattr(socket, "IPPROTO_IPV6"): + sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) + + # automatic port allocation with port=None + # should bind on the same port on IPv4 and IPv6 + host, requested_port = sockaddr[:2] + if requested_port == 0 and bound_port is not None: + sockaddr = tuple([host, bound_port] + list(sockaddr[2:])) + + sock.setblocking(0) + sock.bind(sockaddr) + bound_port = sock.getsockname()[1] + sock.listen(backlog) + sockets.append(sock) + return sockets + + +if hasattr(socket, 'AF_UNIX'): + def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG): + """Creates a listening unix socket. + + If a socket with the given name already exists, it will be deleted. + If any other file with that name exists, an exception will be + raised. + + Returns a socket object (not a list of socket objects like + `bind_sockets`) + """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + set_close_exec(sock.fileno()) + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + except socket.error as e: + if errno_from_exception(e) != errno.ENOPROTOOPT: + # Hurd doesn't support SO_REUSEADDR + raise + sock.setblocking(0) + try: + st = os.stat(file) + except OSError as err: + if errno_from_exception(err) != errno.ENOENT: + raise + else: + if stat.S_ISSOCK(st.st_mode): + os.remove(file) + else: + raise ValueError("File %s exists and is not a socket", file) + sock.bind(file) + os.chmod(file, mode) + sock.listen(backlog) + return sock + + +def add_accept_handler(sock, callback): + """Adds an `.IOLoop` event handler to accept new connections on ``sock``. + + When a connection is accepted, ``callback(connection, address)`` will + be run (``connection`` is a socket object, and ``address`` is the + address of the other end of the connection). Note that this signature + is different from the ``callback(fd, events)`` signature used for + `.IOLoop` handlers. + + A callable is returned which, when called, will remove the `.IOLoop` + event handler and stop processing further incoming connections. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. versionchanged:: 5.0 + A callable is returned (``None`` was returned before). + """ + io_loop = IOLoop.current() + removed = [False] + + def accept_handler(fd, events): + # More connections may come in while we're handling callbacks; + # to prevent starvation of other tasks we must limit the number + # of connections we accept at a time. Ideally we would accept + # up to the number of connections that were waiting when we + # entered this method, but this information is not available + # (and rearranging this method to call accept() as many times + # as possible before running any callbacks would have adverse + # effects on load balancing in multiprocess configurations). + # Instead, we use the (default) listen backlog as a rough + # heuristic for the number of connections we can reasonably + # accept at once. + for i in xrange(_DEFAULT_BACKLOG): + if removed[0]: + # The socket was probably closed + return + try: + connection, address = sock.accept() + except socket.error as e: + # _ERRNO_WOULDBLOCK indicate we have accepted every + # connection that is available. + if errno_from_exception(e) in _ERRNO_WOULDBLOCK: + return + # ECONNABORTED indicates that there was a connection + # but it was closed while still in the accept queue. + # (observed on FreeBSD). + if errno_from_exception(e) == errno.ECONNABORTED: + continue + raise + set_close_exec(connection.fileno()) + callback(connection, address) + + def remove_handler(): + io_loop.remove_handler(sock) + removed[0] = True + + io_loop.add_handler(sock, accept_handler, IOLoop.READ) + return remove_handler + + +def is_valid_ip(ip): + """Returns true if the given string is a well-formed IP address. + + Supports IPv4 and IPv6. + """ + if not ip or '\x00' in ip: + # getaddrinfo resolves empty strings to localhost, and truncates + # on zero bytes. + return False + try: + res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC, + socket.SOCK_STREAM, + 0, socket.AI_NUMERICHOST) + return bool(res) + except socket.gaierror as e: + if e.args[0] == socket.EAI_NONAME: + return False + raise + return True + + +class Resolver(Configurable): + """Configurable asynchronous DNS resolver interface. + + By default, a blocking implementation is used (which simply calls + `socket.getaddrinfo`). An alternative implementation can be + chosen with the `Resolver.configure <.Configurable.configure>` + class method:: + + Resolver.configure('tornado.netutil.ThreadedResolver') + + The implementations of this interface included with Tornado are + + * `tornado.netutil.DefaultExecutorResolver` + * `tornado.netutil.BlockingResolver` (deprecated) + * `tornado.netutil.ThreadedResolver` (deprecated) + * `tornado.netutil.OverrideResolver` + * `tornado.platform.twisted.TwistedResolver` + * `tornado.platform.caresresolver.CaresResolver` + + .. versionchanged:: 5.0 + The default implementation has changed from `BlockingResolver` to + `DefaultExecutorResolver`. + """ + @classmethod + def configurable_base(cls): + return Resolver + + @classmethod + def configurable_default(cls): + return DefaultExecutorResolver + + def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None): + """Resolves an address. + + The ``host`` argument is a string which may be a hostname or a + literal IP address. + + Returns a `.Future` whose result is a list of (family, + address) pairs, where address is a tuple suitable to pass to + `socket.connect ` (i.e. a ``(host, + port)`` pair for IPv4; additional fields may be present for + IPv6). If a ``callback`` is passed, it will be run with the + result as an argument when it is complete. + + :raises IOError: if the address cannot be resolved. + + .. versionchanged:: 4.4 + Standardized all implementations to raise `IOError`. + + .. deprecated:: 5.1 + The ``callback`` argument is deprecated and will be removed in 6.0. + Use the returned awaitable object instead. + """ + raise NotImplementedError() + + def close(self): + """Closes the `Resolver`, freeing any resources used. + + .. versionadded:: 3.1 + + """ + pass + + +def _resolve_addr(host, port, family=socket.AF_UNSPEC): + # On Solaris, getaddrinfo fails if the given port is not found + # in /etc/services and no socket type is given, so we must pass + # one here. The socket type used here doesn't seem to actually + # matter (we discard the one we get back in the results), + # so the addresses we return should still be usable with SOCK_DGRAM. + addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) + results = [] + for family, socktype, proto, canonname, address in addrinfo: + results.append((family, address)) + return results + + +class DefaultExecutorResolver(Resolver): + """Resolver implementation using `.IOLoop.run_in_executor`. + + .. versionadded:: 5.0 + """ + @gen.coroutine + def resolve(self, host, port, family=socket.AF_UNSPEC): + result = yield IOLoop.current().run_in_executor( + None, _resolve_addr, host, port, family) + raise gen.Return(result) + + +class ExecutorResolver(Resolver): + """Resolver implementation using a `concurrent.futures.Executor`. + + Use this instead of `ThreadedResolver` when you require additional + control over the executor being used. + + The executor will be shut down when the resolver is closed unless + ``close_resolver=False``; use this if you want to reuse the same + executor elsewhere. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. deprecated:: 5.0 + The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead + of this class. + """ + def initialize(self, executor=None, close_executor=True): + self.io_loop = IOLoop.current() + if executor is not None: + self.executor = executor + self.close_executor = close_executor + else: + self.executor = dummy_executor + self.close_executor = False + + def close(self): + if self.close_executor: + self.executor.shutdown() + self.executor = None + + @run_on_executor + def resolve(self, host, port, family=socket.AF_UNSPEC): + return _resolve_addr(host, port, family) + + +class BlockingResolver(ExecutorResolver): + """Default `Resolver` implementation, using `socket.getaddrinfo`. + + The `.IOLoop` will be blocked during the resolution, although the + callback will not be run until the next `.IOLoop` iteration. + + .. deprecated:: 5.0 + The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead + of this class. + """ + def initialize(self): + super(BlockingResolver, self).initialize() + + +class ThreadedResolver(ExecutorResolver): + """Multithreaded non-blocking `Resolver` implementation. + + Requires the `concurrent.futures` package to be installed + (available in the standard library since Python 3.2, + installable with ``pip install futures`` in older versions). + + The thread pool size can be configured with:: + + Resolver.configure('tornado.netutil.ThreadedResolver', + num_threads=10) + + .. versionchanged:: 3.1 + All ``ThreadedResolvers`` share a single thread pool, whose + size is set by the first one to be created. + + .. deprecated:: 5.0 + The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead + of this class. + """ + _threadpool = None # type: ignore + _threadpool_pid = None # type: int + + def initialize(self, num_threads=10): + threadpool = ThreadedResolver._create_threadpool(num_threads) + super(ThreadedResolver, self).initialize( + executor=threadpool, close_executor=False) + + @classmethod + def _create_threadpool(cls, num_threads): + pid = os.getpid() + if cls._threadpool_pid != pid: + # Threads cannot survive after a fork, so if our pid isn't what it + # was when we created the pool then delete it. + cls._threadpool = None + if cls._threadpool is None: + from concurrent.futures import ThreadPoolExecutor + cls._threadpool = ThreadPoolExecutor(num_threads) + cls._threadpool_pid = pid + return cls._threadpool + + +class OverrideResolver(Resolver): + """Wraps a resolver with a mapping of overrides. + + This can be used to make local DNS changes (e.g. for testing) + without modifying system-wide settings. + + The mapping can be in three formats:: + + { + # Hostname to host or ip + "example.com": "127.0.1.1", + + # Host+port to host+port + ("login.example.com", 443): ("localhost", 1443), + + # Host+port+address family to host+port + ("login.example.com", 443, socket.AF_INET6): ("::1", 1443), + } + + .. versionchanged:: 5.0 + Added support for host-port-family triplets. + """ + def initialize(self, resolver, mapping): + self.resolver = resolver + self.mapping = mapping + + def close(self): + self.resolver.close() + + def resolve(self, host, port, family=socket.AF_UNSPEC, *args, **kwargs): + if (host, port, family) in self.mapping: + host, port = self.mapping[(host, port, family)] + elif (host, port) in self.mapping: + host, port = self.mapping[(host, port)] + elif host in self.mapping: + host = self.mapping[host] + return self.resolver.resolve(host, port, family, *args, **kwargs) + + +# These are the keyword arguments to ssl.wrap_socket that must be translated +# to their SSLContext equivalents (the other arguments are still passed +# to SSLContext.wrap_socket). +_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile', + 'cert_reqs', 'ca_certs', 'ciphers']) + + +def ssl_options_to_context(ssl_options): + """Try to convert an ``ssl_options`` dictionary to an + `~ssl.SSLContext` object. + + The ``ssl_options`` dictionary contains keywords to be passed to + `ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can + be used instead. This function converts the dict form to its + `~ssl.SSLContext` equivalent, and may be used when a component which + accepts both forms needs to upgrade to the `~ssl.SSLContext` version + to use features like SNI or NPN. + """ + if isinstance(ssl_options, ssl.SSLContext): + return ssl_options + assert isinstance(ssl_options, dict) + assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options + # Can't use create_default_context since this interface doesn't + # tell us client vs server. + context = ssl.SSLContext( + ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23)) + if 'certfile' in ssl_options: + context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None)) + if 'cert_reqs' in ssl_options: + context.verify_mode = ssl_options['cert_reqs'] + if 'ca_certs' in ssl_options: + context.load_verify_locations(ssl_options['ca_certs']) + if 'ciphers' in ssl_options: + context.set_ciphers(ssl_options['ciphers']) + if hasattr(ssl, 'OP_NO_COMPRESSION'): + # Disable TLS compression to avoid CRIME and related attacks. + # This constant depends on openssl version 1.0. + # TODO: Do we need to do this ourselves or can we trust + # the defaults? + context.options |= ssl.OP_NO_COMPRESSION + return context + + +def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs): + """Returns an ``ssl.SSLSocket`` wrapping the given socket. + + ``ssl_options`` may be either an `ssl.SSLContext` object or a + dictionary (as accepted by `ssl_options_to_context`). Additional + keyword arguments are passed to ``wrap_socket`` (either the + `~ssl.SSLContext` method or the `ssl` module function as + appropriate). + """ + context = ssl_options_to_context(ssl_options) + if ssl.HAS_SNI: + # In python 3.4, wrap_socket only accepts the server_hostname + # argument if HAS_SNI is true. + # TODO: add a unittest (python added server-side SNI support in 3.4) + # In the meantime it can be manually tested with + # python3 -m tornado.httpclient https://sni.velox.ch + return context.wrap_socket(socket, server_hostname=server_hostname, + **kwargs) + else: + return context.wrap_socket(socket, **kwargs) diff --git a/server/www/packages/packages-windows/x86/tornado/options.py b/server/www/packages/packages-windows/x86/tornado/options.py new file mode 100644 index 0000000..0a4b965 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/options.py @@ -0,0 +1,654 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A command line parsing module that lets modules define their own options. + +This module is inspired by Google's `gflags +`_. The primary difference +with libraries such as `argparse` is that a global registry is used so +that options may be defined in any module (it also enables +`tornado.log` by default). The rest of Tornado does not depend on this +module, so feel free to use `argparse` or other configuration +libraries if you prefer them. + +Options must be defined with `tornado.options.define` before use, +generally at the top level of a module. The options are then +accessible as attributes of `tornado.options.options`:: + + # myapp/db.py + from tornado.options import define, options + + define("mysql_host", default="127.0.0.1:3306", help="Main user DB") + define("memcache_hosts", default="127.0.0.1:11011", multiple=True, + help="Main user memcache servers") + + def connect(): + db = database.Connection(options.mysql_host) + ... + + # myapp/server.py + from tornado.options import define, options + + define("port", default=8080, help="port to listen on") + + def start_server(): + app = make_app() + app.listen(options.port) + +The ``main()`` method of your application does not need to be aware of all of +the options used throughout your program; they are all automatically loaded +when the modules are loaded. However, all modules that define options +must have been imported before the command line is parsed. + +Your ``main()`` method can parse the command line or parse a config file with +either `parse_command_line` or `parse_config_file`:: + + import myapp.db, myapp.server + import tornado.options + + if __name__ == '__main__': + tornado.options.parse_command_line() + # or + tornado.options.parse_config_file("/etc/server.conf") + +.. note:: + + When using multiple ``parse_*`` functions, pass ``final=False`` to all + but the last one, or side effects may occur twice (in particular, + this can result in log messages being doubled). + +`tornado.options.options` is a singleton instance of `OptionParser`, and +the top-level functions in this module (`define`, `parse_command_line`, etc) +simply call methods on it. You may create additional `OptionParser` +instances to define isolated sets of options, such as for subcommands. + +.. note:: + + By default, several options are defined that will configure the + standard `logging` module when `parse_command_line` or `parse_config_file` + are called. If you want Tornado to leave the logging configuration + alone so you can manage it yourself, either pass ``--logging=none`` + on the command line or do the following to disable it in code:: + + from tornado.options import options, parse_command_line + options.logging = None + parse_command_line() + +.. versionchanged:: 4.3 + Dashes and underscores are fully interchangeable in option names; + options can be defined, set, and read with any mix of the two. + Dashes are typical for command-line usage while config files require + underscores. +""" + +from __future__ import absolute_import, division, print_function + +import datetime +import numbers +import re +import sys +import os +import textwrap + +from tornado.escape import _unicode, native_str +from tornado.log import define_logging_options +from tornado import stack_context +from tornado.util import basestring_type, exec_in + + +class Error(Exception): + """Exception raised by errors in the options module.""" + pass + + +class OptionParser(object): + """A collection of options, a dictionary with object-like access. + + Normally accessed via static functions in the `tornado.options` module, + which reference a global instance. + """ + def __init__(self): + # we have to use self.__dict__ because we override setattr. + self.__dict__['_options'] = {} + self.__dict__['_parse_callbacks'] = [] + self.define("help", type=bool, help="show this help information", + callback=self._help_callback) + + def _normalize_name(self, name): + return name.replace('_', '-') + + def __getattr__(self, name): + name = self._normalize_name(name) + if isinstance(self._options.get(name), _Option): + return self._options[name].value() + raise AttributeError("Unrecognized option %r" % name) + + def __setattr__(self, name, value): + name = self._normalize_name(name) + if isinstance(self._options.get(name), _Option): + return self._options[name].set(value) + raise AttributeError("Unrecognized option %r" % name) + + def __iter__(self): + return (opt.name for opt in self._options.values()) + + def __contains__(self, name): + name = self._normalize_name(name) + return name in self._options + + def __getitem__(self, name): + return self.__getattr__(name) + + def __setitem__(self, name, value): + return self.__setattr__(name, value) + + def items(self): + """A sequence of (name, value) pairs. + + .. versionadded:: 3.1 + """ + return [(opt.name, opt.value()) for name, opt in self._options.items()] + + def groups(self): + """The set of option-groups created by ``define``. + + .. versionadded:: 3.1 + """ + return set(opt.group_name for opt in self._options.values()) + + def group_dict(self, group): + """The names and values of options in a group. + + Useful for copying options into Application settings:: + + from tornado.options import define, parse_command_line, options + + define('template_path', group='application') + define('static_path', group='application') + + parse_command_line() + + application = Application( + handlers, **options.group_dict('application')) + + .. versionadded:: 3.1 + """ + return dict( + (opt.name, opt.value()) for name, opt in self._options.items() + if not group or group == opt.group_name) + + def as_dict(self): + """The names and values of all options. + + .. versionadded:: 3.1 + """ + return dict( + (opt.name, opt.value()) for name, opt in self._options.items()) + + def define(self, name, default=None, type=None, help=None, metavar=None, + multiple=False, group=None, callback=None): + """Defines a new command line option. + + ``type`` can be any of `str`, `int`, `float`, `bool`, + `~datetime.datetime`, or `~datetime.timedelta`. If no ``type`` + is given but a ``default`` is, ``type`` is the type of + ``default``. Otherwise, ``type`` defaults to `str`. + + If ``multiple`` is True, the option value is a list of ``type`` + instead of an instance of ``type``. + + ``help`` and ``metavar`` are used to construct the + automatically generated command line help string. The help + message is formatted like:: + + --name=METAVAR help string + + ``group`` is used to group the defined options in logical + groups. By default, command line options are grouped by the + file in which they are defined. + + Command line option names must be unique globally. + + If a ``callback`` is given, it will be run with the new value whenever + the option is changed. This can be used to combine command-line + and file-based options:: + + define("config", type=str, help="path to config file", + callback=lambda path: parse_config_file(path, final=False)) + + With this definition, options in the file specified by ``--config`` will + override options set earlier on the command line, but can be overridden + by later flags. + + """ + normalized = self._normalize_name(name) + if normalized in self._options: + raise Error("Option %r already defined in %s" % + (normalized, self._options[normalized].file_name)) + frame = sys._getframe(0) + options_file = frame.f_code.co_filename + + # Can be called directly, or through top level define() fn, in which + # case, step up above that frame to look for real caller. + if (frame.f_back.f_code.co_filename == options_file and + frame.f_back.f_code.co_name == 'define'): + frame = frame.f_back + + file_name = frame.f_back.f_code.co_filename + if file_name == options_file: + file_name = "" + if type is None: + if not multiple and default is not None: + type = default.__class__ + else: + type = str + if group: + group_name = group + else: + group_name = file_name + option = _Option(name, file_name=file_name, + default=default, type=type, help=help, + metavar=metavar, multiple=multiple, + group_name=group_name, + callback=callback) + self._options[normalized] = option + + def parse_command_line(self, args=None, final=True): + """Parses all options given on the command line (defaults to + `sys.argv`). + + Options look like ``--option=value`` and are parsed according + to their ``type``. For boolean options, ``--option`` is + equivalent to ``--option=true`` + + If the option has ``multiple=True``, comma-separated values + are accepted. For multi-value integer options, the syntax + ``x:y`` is also accepted and equivalent to ``range(x, y)``. + + Note that ``args[0]`` is ignored since it is the program name + in `sys.argv`. + + We return a list of all arguments that are not parsed as options. + + If ``final`` is ``False``, parse callbacks will not be run. + This is useful for applications that wish to combine configurations + from multiple sources. + + """ + if args is None: + args = sys.argv + remaining = [] + for i in range(1, len(args)): + # All things after the last option are command line arguments + if not args[i].startswith("-"): + remaining = args[i:] + break + if args[i] == "--": + remaining = args[i + 1:] + break + arg = args[i].lstrip("-") + name, equals, value = arg.partition("=") + name = self._normalize_name(name) + if name not in self._options: + self.print_help() + raise Error('Unrecognized command line option: %r' % name) + option = self._options[name] + if not equals: + if option.type == bool: + value = "true" + else: + raise Error('Option %r requires a value' % name) + option.parse(value) + + if final: + self.run_parse_callbacks() + + return remaining + + def parse_config_file(self, path, final=True): + """Parses and loads the config file at the given path. + + The config file contains Python code that will be executed (so + it is **not safe** to use untrusted config files). Anything in + the global namespace that matches a defined option will be + used to set that option's value. + + Options may either be the specified type for the option or + strings (in which case they will be parsed the same way as in + `.parse_command_line`) + + Example (using the options defined in the top-level docs of + this module):: + + port = 80 + mysql_host = 'mydb.example.com:3306' + # Both lists and comma-separated strings are allowed for + # multiple=True. + memcache_hosts = ['cache1.example.com:11011', + 'cache2.example.com:11011'] + memcache_hosts = 'cache1.example.com:11011,cache2.example.com:11011' + + If ``final`` is ``False``, parse callbacks will not be run. + This is useful for applications that wish to combine configurations + from multiple sources. + + .. note:: + + `tornado.options` is primarily a command-line library. + Config file support is provided for applications that wish + to use it, but applications that prefer config files may + wish to look at other libraries instead. + + .. versionchanged:: 4.1 + Config files are now always interpreted as utf-8 instead of + the system default encoding. + + .. versionchanged:: 4.4 + The special variable ``__file__`` is available inside config + files, specifying the absolute path to the config file itself. + + .. versionchanged:: 5.1 + Added the ability to set options via strings in config files. + + """ + config = {'__file__': os.path.abspath(path)} + with open(path, 'rb') as f: + exec_in(native_str(f.read()), config, config) + for name in config: + normalized = self._normalize_name(name) + if normalized in self._options: + option = self._options[normalized] + if option.multiple: + if not isinstance(config[name], (list, str)): + raise Error("Option %r is required to be a list of %s " + "or a comma-separated string" % + (option.name, option.type.__name__)) + + if type(config[name]) == str and option.type != str: + option.parse(config[name]) + else: + option.set(config[name]) + + if final: + self.run_parse_callbacks() + + def print_help(self, file=None): + """Prints all the command line options to stderr (or another file).""" + if file is None: + file = sys.stderr + print("Usage: %s [OPTIONS]" % sys.argv[0], file=file) + print("\nOptions:\n", file=file) + by_group = {} + for option in self._options.values(): + by_group.setdefault(option.group_name, []).append(option) + + for filename, o in sorted(by_group.items()): + if filename: + print("\n%s options:\n" % os.path.normpath(filename), file=file) + o.sort(key=lambda option: option.name) + for option in o: + # Always print names with dashes in a CLI context. + prefix = self._normalize_name(option.name) + if option.metavar: + prefix += "=" + option.metavar + description = option.help or "" + if option.default is not None and option.default != '': + description += " (default %s)" % option.default + lines = textwrap.wrap(description, 79 - 35) + if len(prefix) > 30 or len(lines) == 0: + lines.insert(0, '') + print(" --%-30s %s" % (prefix, lines[0]), file=file) + for line in lines[1:]: + print("%-34s %s" % (' ', line), file=file) + print(file=file) + + def _help_callback(self, value): + if value: + self.print_help() + sys.exit(0) + + def add_parse_callback(self, callback): + """Adds a parse callback, to be invoked when option parsing is done.""" + self._parse_callbacks.append(stack_context.wrap(callback)) + + def run_parse_callbacks(self): + for callback in self._parse_callbacks: + callback() + + def mockable(self): + """Returns a wrapper around self that is compatible with + `mock.patch `. + + The `mock.patch ` function (included in + the standard library `unittest.mock` package since Python 3.3, + or in the third-party ``mock`` package for older versions of + Python) is incompatible with objects like ``options`` that + override ``__getattr__`` and ``__setattr__``. This function + returns an object that can be used with `mock.patch.object + ` to modify option values:: + + with mock.patch.object(options.mockable(), 'name', value): + assert options.name == value + """ + return _Mockable(self) + + +class _Mockable(object): + """`mock.patch` compatible wrapper for `OptionParser`. + + As of ``mock`` version 1.0.1, when an object uses ``__getattr__`` + hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete + the attribute it set instead of setting a new one (assuming that + the object does not catpure ``__setattr__``, so the patch + created a new attribute in ``__dict__``). + + _Mockable's getattr and setattr pass through to the underlying + OptionParser, and delattr undoes the effect of a previous setattr. + """ + def __init__(self, options): + # Modify __dict__ directly to bypass __setattr__ + self.__dict__['_options'] = options + self.__dict__['_originals'] = {} + + def __getattr__(self, name): + return getattr(self._options, name) + + def __setattr__(self, name, value): + assert name not in self._originals, "don't reuse mockable objects" + self._originals[name] = getattr(self._options, name) + setattr(self._options, name, value) + + def __delattr__(self, name): + setattr(self._options, name, self._originals.pop(name)) + + +class _Option(object): + UNSET = object() + + def __init__(self, name, default=None, type=basestring_type, help=None, + metavar=None, multiple=False, file_name=None, group_name=None, + callback=None): + if default is None and multiple: + default = [] + self.name = name + self.type = type + self.help = help + self.metavar = metavar + self.multiple = multiple + self.file_name = file_name + self.group_name = group_name + self.callback = callback + self.default = default + self._value = _Option.UNSET + + def value(self): + return self.default if self._value is _Option.UNSET else self._value + + def parse(self, value): + _parse = { + datetime.datetime: self._parse_datetime, + datetime.timedelta: self._parse_timedelta, + bool: self._parse_bool, + basestring_type: self._parse_string, + }.get(self.type, self.type) + if self.multiple: + self._value = [] + for part in value.split(","): + if issubclass(self.type, numbers.Integral): + # allow ranges of the form X:Y (inclusive at both ends) + lo, _, hi = part.partition(":") + lo = _parse(lo) + hi = _parse(hi) if hi else lo + self._value.extend(range(lo, hi + 1)) + else: + self._value.append(_parse(part)) + else: + self._value = _parse(value) + if self.callback is not None: + self.callback(self._value) + return self.value() + + def set(self, value): + if self.multiple: + if not isinstance(value, list): + raise Error("Option %r is required to be a list of %s" % + (self.name, self.type.__name__)) + for item in value: + if item is not None and not isinstance(item, self.type): + raise Error("Option %r is required to be a list of %s" % + (self.name, self.type.__name__)) + else: + if value is not None and not isinstance(value, self.type): + raise Error("Option %r is required to be a %s (%s given)" % + (self.name, self.type.__name__, type(value))) + self._value = value + if self.callback is not None: + self.callback(self._value) + + # Supported date/time formats in our options + _DATETIME_FORMATS = [ + "%a %b %d %H:%M:%S %Y", + "%Y-%m-%d %H:%M:%S", + "%Y-%m-%d %H:%M", + "%Y-%m-%dT%H:%M", + "%Y%m%d %H:%M:%S", + "%Y%m%d %H:%M", + "%Y-%m-%d", + "%Y%m%d", + "%H:%M:%S", + "%H:%M", + ] + + def _parse_datetime(self, value): + for format in self._DATETIME_FORMATS: + try: + return datetime.datetime.strptime(value, format) + except ValueError: + pass + raise Error('Unrecognized date/time format: %r' % value) + + _TIMEDELTA_ABBREV_DICT = { + 'h': 'hours', + 'm': 'minutes', + 'min': 'minutes', + 's': 'seconds', + 'sec': 'seconds', + 'ms': 'milliseconds', + 'us': 'microseconds', + 'd': 'days', + 'w': 'weeks', + } + + _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' + + _TIMEDELTA_PATTERN = re.compile( + r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE) + + def _parse_timedelta(self, value): + try: + sum = datetime.timedelta() + start = 0 + while start < len(value): + m = self._TIMEDELTA_PATTERN.match(value, start) + if not m: + raise Exception() + num = float(m.group(1)) + units = m.group(2) or 'seconds' + units = self._TIMEDELTA_ABBREV_DICT.get(units, units) + sum += datetime.timedelta(**{units: num}) + start = m.end() + return sum + except Exception: + raise + + def _parse_bool(self, value): + return value.lower() not in ("false", "0", "f") + + def _parse_string(self, value): + return _unicode(value) + + +options = OptionParser() +"""Global options object. + +All defined options are available as attributes on this object. +""" + + +def define(name, default=None, type=None, help=None, metavar=None, + multiple=False, group=None, callback=None): + """Defines an option in the global namespace. + + See `OptionParser.define`. + """ + return options.define(name, default=default, type=type, help=help, + metavar=metavar, multiple=multiple, group=group, + callback=callback) + + +def parse_command_line(args=None, final=True): + """Parses global options from the command line. + + See `OptionParser.parse_command_line`. + """ + return options.parse_command_line(args, final=final) + + +def parse_config_file(path, final=True): + """Parses global options from a config file. + + See `OptionParser.parse_config_file`. + """ + return options.parse_config_file(path, final=final) + + +def print_help(file=None): + """Prints all the command line options to stderr (or another file). + + See `OptionParser.print_help`. + """ + return options.print_help(file) + + +def add_parse_callback(callback): + """Adds a parse callback, to be invoked when option parsing is done. + + See `OptionParser.add_parse_callback` + """ + options.add_parse_callback(callback) + + +# Default options +define_logging_options(options) diff --git a/server/www/packages/packages-windows/x86/tornado/platform/__init__.py b/server/www/packages/packages-windows/x86/tornado/platform/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/www/packages/packages-windows/x86/tornado/platform/asyncio.py b/server/www/packages/packages-windows/x86/tornado/platform/asyncio.py new file mode 100644 index 0000000..e0042e1 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/asyncio.py @@ -0,0 +1,299 @@ +"""Bridges between the `asyncio` module and Tornado IOLoop. + +.. versionadded:: 3.2 + +This module integrates Tornado with the ``asyncio`` module introduced +in Python 3.4. This makes it possible to combine the two libraries on +the same event loop. + +.. deprecated:: 5.0 + + While the code in this module is still used, it is now enabled + automatically when `asyncio` is available, so applications should + no longer need to refer to this module directly. + +.. note:: + + Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of + methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on + Windows. Use the `~asyncio.SelectorEventLoop` instead. +""" + +from __future__ import absolute_import, division, print_function +import functools + +from tornado.gen import convert_yielded +from tornado.ioloop import IOLoop +from tornado import stack_context + +import asyncio + + +class BaseAsyncIOLoop(IOLoop): + def initialize(self, asyncio_loop, **kwargs): + self.asyncio_loop = asyncio_loop + # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) + self.handlers = {} + # Set of fds listening for reads/writes + self.readers = set() + self.writers = set() + self.closing = False + # If an asyncio loop was closed through an asyncio interface + # instead of IOLoop.close(), we'd never hear about it and may + # have left a dangling reference in our map. In case an + # application (or, more likely, a test suite) creates and + # destroys a lot of event loops in this way, check here to + # ensure that we don't have a lot of dead loops building up in + # the map. + # + # TODO(bdarnell): consider making self.asyncio_loop a weakref + # for AsyncIOMainLoop and make _ioloop_for_asyncio a + # WeakKeyDictionary. + for loop in list(IOLoop._ioloop_for_asyncio): + if loop.is_closed(): + del IOLoop._ioloop_for_asyncio[loop] + IOLoop._ioloop_for_asyncio[asyncio_loop] = self + super(BaseAsyncIOLoop, self).initialize(**kwargs) + + def close(self, all_fds=False): + self.closing = True + for fd in list(self.handlers): + fileobj, handler_func = self.handlers[fd] + self.remove_handler(fd) + if all_fds: + self.close_fd(fileobj) + # Remove the mapping before closing the asyncio loop. If this + # happened in the other order, we could race against another + # initialize() call which would see the closed asyncio loop, + # assume it was closed from the asyncio side, and do this + # cleanup for us, leading to a KeyError. + del IOLoop._ioloop_for_asyncio[self.asyncio_loop] + self.asyncio_loop.close() + + def add_handler(self, fd, handler, events): + fd, fileobj = self.split_fd(fd) + if fd in self.handlers: + raise ValueError("fd %s added twice" % fd) + self.handlers[fd] = (fileobj, stack_context.wrap(handler)) + if events & IOLoop.READ: + self.asyncio_loop.add_reader( + fd, self._handle_events, fd, IOLoop.READ) + self.readers.add(fd) + if events & IOLoop.WRITE: + self.asyncio_loop.add_writer( + fd, self._handle_events, fd, IOLoop.WRITE) + self.writers.add(fd) + + def update_handler(self, fd, events): + fd, fileobj = self.split_fd(fd) + if events & IOLoop.READ: + if fd not in self.readers: + self.asyncio_loop.add_reader( + fd, self._handle_events, fd, IOLoop.READ) + self.readers.add(fd) + else: + if fd in self.readers: + self.asyncio_loop.remove_reader(fd) + self.readers.remove(fd) + if events & IOLoop.WRITE: + if fd not in self.writers: + self.asyncio_loop.add_writer( + fd, self._handle_events, fd, IOLoop.WRITE) + self.writers.add(fd) + else: + if fd in self.writers: + self.asyncio_loop.remove_writer(fd) + self.writers.remove(fd) + + def remove_handler(self, fd): + fd, fileobj = self.split_fd(fd) + if fd not in self.handlers: + return + if fd in self.readers: + self.asyncio_loop.remove_reader(fd) + self.readers.remove(fd) + if fd in self.writers: + self.asyncio_loop.remove_writer(fd) + self.writers.remove(fd) + del self.handlers[fd] + + def _handle_events(self, fd, events): + fileobj, handler_func = self.handlers[fd] + handler_func(fileobj, events) + + def start(self): + try: + old_loop = asyncio.get_event_loop() + except (RuntimeError, AssertionError): + old_loop = None + try: + self._setup_logging() + asyncio.set_event_loop(self.asyncio_loop) + self.asyncio_loop.run_forever() + finally: + asyncio.set_event_loop(old_loop) + + def stop(self): + self.asyncio_loop.stop() + + def call_at(self, when, callback, *args, **kwargs): + # asyncio.call_at supports *args but not **kwargs, so bind them here. + # We do not synchronize self.time and asyncio_loop.time, so + # convert from absolute to relative. + return self.asyncio_loop.call_later( + max(0, when - self.time()), self._run_callback, + functools.partial(stack_context.wrap(callback), *args, **kwargs)) + + def remove_timeout(self, timeout): + timeout.cancel() + + def add_callback(self, callback, *args, **kwargs): + try: + self.asyncio_loop.call_soon_threadsafe( + self._run_callback, + functools.partial(stack_context.wrap(callback), *args, **kwargs)) + except RuntimeError: + # "Event loop is closed". Swallow the exception for + # consistency with PollIOLoop (and logical consistency + # with the fact that we can't guarantee that an + # add_callback that completes without error will + # eventually execute). + pass + + add_callback_from_signal = add_callback + + def run_in_executor(self, executor, func, *args): + return self.asyncio_loop.run_in_executor(executor, func, *args) + + def set_default_executor(self, executor): + return self.asyncio_loop.set_default_executor(executor) + + +class AsyncIOMainLoop(BaseAsyncIOLoop): + """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the + current ``asyncio`` event loop (i.e. the one returned by + ``asyncio.get_event_loop()``). + + .. deprecated:: 5.0 + + Now used automatically when appropriate; it is no longer necessary + to refer to this class directly. + + .. versionchanged:: 5.0 + + Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop. + """ + def initialize(self, **kwargs): + super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs) + + def make_current(self): + # AsyncIOMainLoop already refers to the current asyncio loop so + # nothing to do here. + pass + + +class AsyncIOLoop(BaseAsyncIOLoop): + """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop. + This class follows the usual Tornado semantics for creating new + ``IOLoops``; these loops are not necessarily related to the + ``asyncio`` default event loop. + + Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object + can be accessed with the ``asyncio_loop`` attribute. + + .. versionchanged:: 5.0 + + When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets + the current `asyncio` event loop. + + .. deprecated:: 5.0 + + Now used automatically when appropriate; it is no longer necessary + to refer to this class directly. + """ + def initialize(self, **kwargs): + self.is_current = False + loop = asyncio.new_event_loop() + try: + super(AsyncIOLoop, self).initialize(loop, **kwargs) + except Exception: + # If initialize() does not succeed (taking ownership of the loop), + # we have to close it. + loop.close() + raise + + def close(self, all_fds=False): + if self.is_current: + self.clear_current() + super(AsyncIOLoop, self).close(all_fds=all_fds) + + def make_current(self): + if not self.is_current: + try: + self.old_asyncio = asyncio.get_event_loop() + except (RuntimeError, AssertionError): + self.old_asyncio = None + self.is_current = True + asyncio.set_event_loop(self.asyncio_loop) + + def _clear_current_hook(self): + if self.is_current: + asyncio.set_event_loop(self.old_asyncio) + self.is_current = False + + +def to_tornado_future(asyncio_future): + """Convert an `asyncio.Future` to a `tornado.concurrent.Future`. + + .. versionadded:: 4.1 + + .. deprecated:: 5.0 + Tornado ``Futures`` have been merged with `asyncio.Future`, + so this method is now a no-op. + """ + return asyncio_future + + +def to_asyncio_future(tornado_future): + """Convert a Tornado yieldable object to an `asyncio.Future`. + + .. versionadded:: 4.1 + + .. versionchanged:: 4.3 + Now accepts any yieldable object, not just + `tornado.concurrent.Future`. + + .. deprecated:: 5.0 + Tornado ``Futures`` have been merged with `asyncio.Future`, + so this method is now equivalent to `tornado.gen.convert_yielded`. + """ + return convert_yielded(tornado_future) + + +class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy): + """Event loop policy that allows loop creation on any thread. + + The default `asyncio` event loop policy only automatically creates + event loops in the main threads. Other threads must create event + loops explicitly or `asyncio.get_event_loop` (and therefore + `.IOLoop.current`) will fail. Installing this policy allows event + loops to be created automatically on any thread, matching the + behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). + + Usage:: + + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + + .. versionadded:: 5.0 + + """ + def get_event_loop(self): + try: + return super().get_event_loop() + except (RuntimeError, AssertionError): + # This was an AssertionError in python 3.4.2 (which ships with debian jessie) + # and changed to a RuntimeError in 3.4.3. + # "There is no current event loop in thread %r" + loop = self.new_event_loop() + self.set_event_loop(loop) + return loop diff --git a/server/www/packages/packages-windows/x86/tornado/platform/auto.py b/server/www/packages/packages-windows/x86/tornado/platform/auto.py new file mode 100644 index 0000000..1a9133f --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/auto.py @@ -0,0 +1,58 @@ +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of platform-specific functionality. + +For each function or class described in `tornado.platform.interface`, +the appropriate platform-specific implementation exists in this module. +Most code that needs access to this functionality should do e.g.:: + + from tornado.platform.auto import set_close_exec +""" + +from __future__ import absolute_import, division, print_function + +import os + +if 'APPENGINE_RUNTIME' in os.environ: + from tornado.platform.common import Waker + + def set_close_exec(fd): + pass +elif os.name == 'nt': + from tornado.platform.common import Waker + from tornado.platform.windows import set_close_exec +else: + from tornado.platform.posix import set_close_exec, Waker + +try: + # monotime monkey-patches the time module to have a monotonic function + # in versions of python before 3.3. + import monotime + # Silence pyflakes warning about this unused import + monotime +except ImportError: + pass +try: + # monotonic can provide a monotonic function in versions of python before + # 3.3, too. + from monotonic import monotonic as monotonic_time +except ImportError: + try: + from time import monotonic as monotonic_time + except ImportError: + monotonic_time = None + +__all__ = ['Waker', 'set_close_exec', 'monotonic_time'] diff --git a/server/www/packages/packages-windows/x86/tornado/platform/caresresolver.py b/server/www/packages/packages-windows/x86/tornado/platform/caresresolver.py new file mode 100644 index 0000000..768cb62 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/caresresolver.py @@ -0,0 +1,79 @@ +from __future__ import absolute_import, division, print_function +import pycares # type: ignore +import socket + +from tornado.concurrent import Future +from tornado import gen +from tornado.ioloop import IOLoop +from tornado.netutil import Resolver, is_valid_ip + + +class CaresResolver(Resolver): + """Name resolver based on the c-ares library. + + This is a non-blocking and non-threaded resolver. It may not produce + the same results as the system resolver, but can be used for non-blocking + resolution when threads cannot be used. + + c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, + so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is + the default for ``tornado.simple_httpclient``, but other libraries + may default to ``AF_UNSPEC``. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + """ + def initialize(self): + self.io_loop = IOLoop.current() + self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb) + self.fds = {} + + def _sock_state_cb(self, fd, readable, writable): + state = ((IOLoop.READ if readable else 0) | + (IOLoop.WRITE if writable else 0)) + if not state: + self.io_loop.remove_handler(fd) + del self.fds[fd] + elif fd in self.fds: + self.io_loop.update_handler(fd, state) + self.fds[fd] = state + else: + self.io_loop.add_handler(fd, self._handle_events, state) + self.fds[fd] = state + + def _handle_events(self, fd, events): + read_fd = pycares.ARES_SOCKET_BAD + write_fd = pycares.ARES_SOCKET_BAD + if events & IOLoop.READ: + read_fd = fd + if events & IOLoop.WRITE: + write_fd = fd + self.channel.process_fd(read_fd, write_fd) + + @gen.coroutine + def resolve(self, host, port, family=0): + if is_valid_ip(host): + addresses = [host] + else: + # gethostbyname doesn't take callback as a kwarg + fut = Future() + self.channel.gethostbyname(host, family, + lambda result, error: fut.set_result((result, error))) + result, error = yield fut + if error: + raise IOError('C-Ares returned error %s: %s while resolving %s' % + (error, pycares.errno.strerror(error), host)) + addresses = result.addresses + addrinfo = [] + for address in addresses: + if '.' in address: + address_family = socket.AF_INET + elif ':' in address: + address_family = socket.AF_INET6 + else: + address_family = socket.AF_UNSPEC + if family != socket.AF_UNSPEC and family != address_family: + raise IOError('Requested socket family %d but got %d' % + (family, address_family)) + addrinfo.append((address_family, (address, port))) + raise gen.Return(addrinfo) diff --git a/server/www/packages/packages-windows/x86/tornado/platform/common.py b/server/www/packages/packages-windows/x86/tornado/platform/common.py new file mode 100644 index 0000000..b597748 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/common.py @@ -0,0 +1,113 @@ +"""Lowest-common-denominator implementations of platform functionality.""" +from __future__ import absolute_import, division, print_function + +import errno +import socket +import time + +from tornado.platform import interface +from tornado.util import errno_from_exception + + +def try_close(f): + # Avoid issue #875 (race condition when using the file in another + # thread). + for i in range(10): + try: + f.close() + except IOError: + # Yield to another thread + time.sleep(1e-3) + else: + break + # Try a last time and let raise + f.close() + + +class Waker(interface.Waker): + """Create an OS independent asynchronous pipe. + + For use on platforms that don't have os.pipe() (or where pipes cannot + be passed to select()), but do have sockets. This includes Windows + and Jython. + """ + def __init__(self): + from .auto import set_close_exec + # Based on Zope select_trigger.py: + # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py + + self.writer = socket.socket() + set_close_exec(self.writer.fileno()) + # Disable buffering -- pulling the trigger sends 1 byte, + # and we want that sent immediately, to wake up ASAP. + self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + count = 0 + while 1: + count += 1 + # Bind to a local port; for efficiency, let the OS pick + # a free port for us. + # Unfortunately, stress tests showed that we may not + # be able to connect to that port ("Address already in + # use") despite that the OS picked it. This appears + # to be a race bug in the Windows socket implementation. + # So we loop until a connect() succeeds (almost always + # on the first try). See the long thread at + # http://mail.zope.org/pipermail/zope/2005-July/160433.html + # for hideous details. + a = socket.socket() + set_close_exec(a.fileno()) + a.bind(("127.0.0.1", 0)) + a.listen(1) + connect_address = a.getsockname() # assigned (host, port) pair + try: + self.writer.connect(connect_address) + break # success + except socket.error as detail: + if (not hasattr(errno, 'WSAEADDRINUSE') or + errno_from_exception(detail) != errno.WSAEADDRINUSE): + # "Address already in use" is the only error + # I've seen on two WinXP Pro SP2 boxes, under + # Pythons 2.3.5 and 2.4.1. + raise + # (10048, 'Address already in use') + # assert count <= 2 # never triggered in Tim's tests + if count >= 10: # I've never seen it go above 2 + a.close() + self.writer.close() + raise socket.error("Cannot bind trigger!") + # Close `a` and try again. Note: I originally put a short + # sleep() here, but it didn't appear to help or hurt. + a.close() + + self.reader, addr = a.accept() + set_close_exec(self.reader.fileno()) + self.reader.setblocking(0) + self.writer.setblocking(0) + a.close() + self.reader_fd = self.reader.fileno() + + def fileno(self): + return self.reader.fileno() + + def write_fileno(self): + return self.writer.fileno() + + def wake(self): + try: + self.writer.send(b"x") + except (IOError, socket.error, ValueError): + pass + + def consume(self): + try: + while True: + result = self.reader.recv(1024) + if not result: + break + except (IOError, socket.error): + pass + + def close(self): + self.reader.close() + try_close(self.writer) diff --git a/server/www/packages/packages-windows/x86/tornado/platform/epoll.py b/server/www/packages/packages-windows/x86/tornado/platform/epoll.py new file mode 100644 index 0000000..4e34617 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/epoll.py @@ -0,0 +1,25 @@ +# +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""EPoll-based IOLoop implementation for Linux systems.""" +from __future__ import absolute_import, division, print_function + +import select + +from tornado.ioloop import PollIOLoop + + +class EPollIOLoop(PollIOLoop): + def initialize(self, **kwargs): + super(EPollIOLoop, self).initialize(impl=select.epoll(), **kwargs) diff --git a/server/www/packages/packages-windows/x86/tornado/platform/interface.py b/server/www/packages/packages-windows/x86/tornado/platform/interface.py new file mode 100644 index 0000000..cac5326 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/interface.py @@ -0,0 +1,66 @@ +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Interfaces for platform-specific functionality. + +This module exists primarily for documentation purposes and as base classes +for other tornado.platform modules. Most code should import the appropriate +implementation from `tornado.platform.auto`. +""" + +from __future__ import absolute_import, division, print_function + + +def set_close_exec(fd): + """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor.""" + raise NotImplementedError() + + +class Waker(object): + """A socket-like object that can wake another thread from ``select()``. + + The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to + its ``select`` (or ``epoll`` or ``kqueue``) calls. When another + thread wants to wake up the loop, it calls `wake`. Once it has woken + up, it will call `consume` to do any necessary per-wake cleanup. When + the ``IOLoop`` is closed, it closes its waker too. + """ + def fileno(self): + """Returns the read file descriptor for this waker. + + Must be suitable for use with ``select()`` or equivalent on the + local platform. + """ + raise NotImplementedError() + + def write_fileno(self): + """Returns the write file descriptor for this waker.""" + raise NotImplementedError() + + def wake(self): + """Triggers activity on the waker's file descriptor.""" + raise NotImplementedError() + + def consume(self): + """Called after the listen has woken up to do any necessary cleanup.""" + raise NotImplementedError() + + def close(self): + """Closes the waker's file descriptor(s).""" + raise NotImplementedError() + + +def monotonic_time(): + raise NotImplementedError() diff --git a/server/www/packages/packages-windows/x86/tornado/platform/kqueue.py b/server/www/packages/packages-windows/x86/tornado/platform/kqueue.py new file mode 100644 index 0000000..4e0aee0 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/kqueue.py @@ -0,0 +1,90 @@ +# +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""KQueue-based IOLoop implementation for BSD/Mac systems.""" +from __future__ import absolute_import, division, print_function + +import select + +from tornado.ioloop import IOLoop, PollIOLoop + +assert hasattr(select, 'kqueue'), 'kqueue not supported' + + +class _KQueue(object): + """A kqueue-based event loop for BSD/Mac systems.""" + def __init__(self): + self._kqueue = select.kqueue() + self._active = {} + + def fileno(self): + return self._kqueue.fileno() + + def close(self): + self._kqueue.close() + + def register(self, fd, events): + if fd in self._active: + raise IOError("fd %s already registered" % fd) + self._control(fd, events, select.KQ_EV_ADD) + self._active[fd] = events + + def modify(self, fd, events): + self.unregister(fd) + self.register(fd, events) + + def unregister(self, fd): + events = self._active.pop(fd) + self._control(fd, events, select.KQ_EV_DELETE) + + def _control(self, fd, events, flags): + kevents = [] + if events & IOLoop.WRITE: + kevents.append(select.kevent( + fd, filter=select.KQ_FILTER_WRITE, flags=flags)) + if events & IOLoop.READ: + kevents.append(select.kevent( + fd, filter=select.KQ_FILTER_READ, flags=flags)) + # Even though control() takes a list, it seems to return EINVAL + # on Mac OS X (10.6) when there is more than one event in the list. + for kevent in kevents: + self._kqueue.control([kevent], 0) + + def poll(self, timeout): + kevents = self._kqueue.control(None, 1000, timeout) + events = {} + for kevent in kevents: + fd = kevent.ident + if kevent.filter == select.KQ_FILTER_READ: + events[fd] = events.get(fd, 0) | IOLoop.READ + if kevent.filter == select.KQ_FILTER_WRITE: + if kevent.flags & select.KQ_EV_EOF: + # If an asynchronous connection is refused, kqueue + # returns a write event with the EOF flag set. + # Turn this into an error for consistency with the + # other IOLoop implementations. + # Note that for read events, EOF may be returned before + # all data has been consumed from the socket buffer, + # so we only check for EOF on write events. + events[fd] = IOLoop.ERROR + else: + events[fd] = events.get(fd, 0) | IOLoop.WRITE + if kevent.flags & select.KQ_EV_ERROR: + events[fd] = events.get(fd, 0) | IOLoop.ERROR + return events.items() + + +class KQueueIOLoop(PollIOLoop): + def initialize(self, **kwargs): + super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs) diff --git a/server/www/packages/packages-windows/x86/tornado/platform/posix.py b/server/www/packages/packages-windows/x86/tornado/platform/posix.py new file mode 100644 index 0000000..6fe1fa8 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/posix.py @@ -0,0 +1,69 @@ +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Posix implementations of platform-specific functionality.""" + +from __future__ import absolute_import, division, print_function + +import fcntl +import os + +from tornado.platform import common, interface + + +def set_close_exec(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) + + +def _set_nonblocking(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) + + +class Waker(interface.Waker): + def __init__(self): + r, w = os.pipe() + _set_nonblocking(r) + _set_nonblocking(w) + set_close_exec(r) + set_close_exec(w) + self.reader = os.fdopen(r, "rb", 0) + self.writer = os.fdopen(w, "wb", 0) + + def fileno(self): + return self.reader.fileno() + + def write_fileno(self): + return self.writer.fileno() + + def wake(self): + try: + self.writer.write(b"x") + except (IOError, ValueError): + pass + + def consume(self): + try: + while True: + result = self.reader.read() + if not result: + break + except IOError: + pass + + def close(self): + self.reader.close() + common.try_close(self.writer) diff --git a/server/www/packages/packages-windows/x86/tornado/platform/select.py b/server/www/packages/packages-windows/x86/tornado/platform/select.py new file mode 100644 index 0000000..14e8a47 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/select.py @@ -0,0 +1,75 @@ +# +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Select-based IOLoop implementation. + +Used as a fallback for systems that don't support epoll or kqueue. +""" +from __future__ import absolute_import, division, print_function + +import select + +from tornado.ioloop import IOLoop, PollIOLoop + + +class _Select(object): + """A simple, select()-based IOLoop implementation for non-Linux systems""" + def __init__(self): + self.read_fds = set() + self.write_fds = set() + self.error_fds = set() + self.fd_sets = (self.read_fds, self.write_fds, self.error_fds) + + def close(self): + pass + + def register(self, fd, events): + if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds: + raise IOError("fd %s already registered" % fd) + if events & IOLoop.READ: + self.read_fds.add(fd) + if events & IOLoop.WRITE: + self.write_fds.add(fd) + if events & IOLoop.ERROR: + self.error_fds.add(fd) + # Closed connections are reported as errors by epoll and kqueue, + # but as zero-byte reads by select, so when errors are requested + # we need to listen for both read and error. + # self.read_fds.add(fd) + + def modify(self, fd, events): + self.unregister(fd) + self.register(fd, events) + + def unregister(self, fd): + self.read_fds.discard(fd) + self.write_fds.discard(fd) + self.error_fds.discard(fd) + + def poll(self, timeout): + readable, writeable, errors = select.select( + self.read_fds, self.write_fds, self.error_fds, timeout) + events = {} + for fd in readable: + events[fd] = events.get(fd, 0) | IOLoop.READ + for fd in writeable: + events[fd] = events.get(fd, 0) | IOLoop.WRITE + for fd in errors: + events[fd] = events.get(fd, 0) | IOLoop.ERROR + return events.items() + + +class SelectIOLoop(PollIOLoop): + def initialize(self, **kwargs): + super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs) diff --git a/server/www/packages/packages-windows/x86/tornado/platform/twisted.py b/server/www/packages/packages-windows/x86/tornado/platform/twisted.py new file mode 100644 index 0000000..b38a755 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/twisted.py @@ -0,0 +1,609 @@ +# Author: Ovidiu Predescu +# Date: July 2011 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Bridges between the Twisted reactor and Tornado IOLoop. + +This module lets you run applications and libraries written for +Twisted in a Tornado application. It can be used in two modes, +depending on which library's underlying event loop you want to use. + +This module has been tested with Twisted versions 11.0.0 and newer. +""" + +from __future__ import absolute_import, division, print_function + +import datetime +import functools +import numbers +import socket +import sys + +import twisted.internet.abstract # type: ignore +from twisted.internet.defer import Deferred # type: ignore +from twisted.internet.posixbase import PosixReactorBase # type: ignore +from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore # noqa: E501 +from twisted.python import failure, log # type: ignore +from twisted.internet import error # type: ignore +import twisted.names.cache # type: ignore +import twisted.names.client # type: ignore +import twisted.names.hosts # type: ignore +import twisted.names.resolve # type: ignore + +from zope.interface import implementer # type: ignore + +from tornado.concurrent import Future, future_set_exc_info +from tornado.escape import utf8 +from tornado import gen +import tornado.ioloop +from tornado.log import app_log +from tornado.netutil import Resolver +from tornado.stack_context import NullContext, wrap +from tornado.ioloop import IOLoop +from tornado.util import timedelta_to_seconds + + +@implementer(IDelayedCall) +class TornadoDelayedCall(object): + """DelayedCall object for Tornado.""" + def __init__(self, reactor, seconds, f, *args, **kw): + self._reactor = reactor + self._func = functools.partial(f, *args, **kw) + self._time = self._reactor.seconds() + seconds + self._timeout = self._reactor._io_loop.add_timeout(self._time, + self._called) + self._active = True + + def _called(self): + self._active = False + self._reactor._removeDelayedCall(self) + try: + self._func() + except: + app_log.error("_called caught exception", exc_info=True) + + def getTime(self): + return self._time + + def cancel(self): + self._active = False + self._reactor._io_loop.remove_timeout(self._timeout) + self._reactor._removeDelayedCall(self) + + def delay(self, seconds): + self._reactor._io_loop.remove_timeout(self._timeout) + self._time += seconds + self._timeout = self._reactor._io_loop.add_timeout(self._time, + self._called) + + def reset(self, seconds): + self._reactor._io_loop.remove_timeout(self._timeout) + self._time = self._reactor.seconds() + seconds + self._timeout = self._reactor._io_loop.add_timeout(self._time, + self._called) + + def active(self): + return self._active + + +@implementer(IReactorTime, IReactorFDSet) +class TornadoReactor(PosixReactorBase): + """Twisted reactor built on the Tornado IOLoop. + + `TornadoReactor` implements the Twisted reactor interface on top of + the Tornado IOLoop. To use it, simply call `install` at the beginning + of the application:: + + import tornado.platform.twisted + tornado.platform.twisted.install() + from twisted.internet import reactor + + When the app is ready to start, call ``IOLoop.current().start()`` + instead of ``reactor.run()``. + + It is also possible to create a non-global reactor by calling + ``tornado.platform.twisted.TornadoReactor()``. However, if + the `.IOLoop` and reactor are to be short-lived (such as those used in + unit tests), additional cleanup may be required. Specifically, it is + recommended to call:: + + reactor.fireSystemEvent('shutdown') + reactor.disconnectAll() + + before closing the `.IOLoop`. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. deprecated:: 5.1 + + This class will be removed in Tornado 6.0. Use + ``twisted.internet.asyncioreactor.AsyncioSelectorReactor`` + instead. + + """ + def __init__(self): + self._io_loop = tornado.ioloop.IOLoop.current() + self._readers = {} # map of reader objects to fd + self._writers = {} # map of writer objects to fd + self._fds = {} # a map of fd to a (reader, writer) tuple + self._delayedCalls = {} + PosixReactorBase.__init__(self) + self.addSystemEventTrigger('during', 'shutdown', self.crash) + + # IOLoop.start() bypasses some of the reactor initialization. + # Fire off the necessary events if they weren't already triggered + # by reactor.run(). + def start_if_necessary(): + if not self._started: + self.fireSystemEvent('startup') + self._io_loop.add_callback(start_if_necessary) + + # IReactorTime + def seconds(self): + return self._io_loop.time() + + def callLater(self, seconds, f, *args, **kw): + dc = TornadoDelayedCall(self, seconds, f, *args, **kw) + self._delayedCalls[dc] = True + return dc + + def getDelayedCalls(self): + return [x for x in self._delayedCalls if x._active] + + def _removeDelayedCall(self, dc): + if dc in self._delayedCalls: + del self._delayedCalls[dc] + + # IReactorThreads + def callFromThread(self, f, *args, **kw): + assert callable(f), "%s is not callable" % f + with NullContext(): + # This NullContext is mainly for an edge case when running + # TwistedIOLoop on top of a TornadoReactor. + # TwistedIOLoop.add_callback uses reactor.callFromThread and + # should not pick up additional StackContexts along the way. + self._io_loop.add_callback(f, *args, **kw) + + # We don't need the waker code from the super class, Tornado uses + # its own waker. + def installWaker(self): + pass + + def wakeUp(self): + pass + + # IReactorFDSet + def _invoke_callback(self, fd, events): + if fd not in self._fds: + return + (reader, writer) = self._fds[fd] + if reader: + err = None + if reader.fileno() == -1: + err = error.ConnectionLost() + elif events & IOLoop.READ: + err = log.callWithLogger(reader, reader.doRead) + if err is None and events & IOLoop.ERROR: + err = error.ConnectionLost() + if err is not None: + self.removeReader(reader) + reader.readConnectionLost(failure.Failure(err)) + if writer: + err = None + if writer.fileno() == -1: + err = error.ConnectionLost() + elif events & IOLoop.WRITE: + err = log.callWithLogger(writer, writer.doWrite) + if err is None and events & IOLoop.ERROR: + err = error.ConnectionLost() + if err is not None: + self.removeWriter(writer) + writer.writeConnectionLost(failure.Failure(err)) + + def addReader(self, reader): + if reader in self._readers: + # Don't add the reader if it's already there + return + fd = reader.fileno() + self._readers[reader] = fd + if fd in self._fds: + (_, writer) = self._fds[fd] + self._fds[fd] = (reader, writer) + if writer: + # We already registered this fd for write events, + # update it for read events as well. + self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) + else: + with NullContext(): + self._fds[fd] = (reader, None) + self._io_loop.add_handler(fd, self._invoke_callback, + IOLoop.READ) + + def addWriter(self, writer): + if writer in self._writers: + return + fd = writer.fileno() + self._writers[writer] = fd + if fd in self._fds: + (reader, _) = self._fds[fd] + self._fds[fd] = (reader, writer) + if reader: + # We already registered this fd for read events, + # update it for write events as well. + self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) + else: + with NullContext(): + self._fds[fd] = (None, writer) + self._io_loop.add_handler(fd, self._invoke_callback, + IOLoop.WRITE) + + def removeReader(self, reader): + if reader in self._readers: + fd = self._readers.pop(reader) + (_, writer) = self._fds[fd] + if writer: + # We have a writer so we need to update the IOLoop for + # write events only. + self._fds[fd] = (None, writer) + self._io_loop.update_handler(fd, IOLoop.WRITE) + else: + # Since we have no writer registered, we remove the + # entry from _fds and unregister the handler from the + # IOLoop + del self._fds[fd] + self._io_loop.remove_handler(fd) + + def removeWriter(self, writer): + if writer in self._writers: + fd = self._writers.pop(writer) + (reader, _) = self._fds[fd] + if reader: + # We have a reader so we need to update the IOLoop for + # read events only. + self._fds[fd] = (reader, None) + self._io_loop.update_handler(fd, IOLoop.READ) + else: + # Since we have no reader registered, we remove the + # entry from the _fds and unregister the handler from + # the IOLoop. + del self._fds[fd] + self._io_loop.remove_handler(fd) + + def removeAll(self): + return self._removeAll(self._readers, self._writers) + + def getReaders(self): + return self._readers.keys() + + def getWriters(self): + return self._writers.keys() + + # The following functions are mainly used in twisted-style test cases; + # it is expected that most users of the TornadoReactor will call + # IOLoop.start() instead of Reactor.run(). + def stop(self): + PosixReactorBase.stop(self) + fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown") + self._io_loop.add_callback(fire_shutdown) + + def crash(self): + PosixReactorBase.crash(self) + self._io_loop.stop() + + def doIteration(self, delay): + raise NotImplementedError("doIteration") + + def mainLoop(self): + # Since this class is intended to be used in applications + # where the top-level event loop is ``io_loop.start()`` rather + # than ``reactor.run()``, it is implemented a little + # differently than other Twisted reactors. We override + # ``mainLoop`` instead of ``doIteration`` and must implement + # timed call functionality on top of `.IOLoop.add_timeout` + # rather than using the implementation in + # ``PosixReactorBase``. + self._io_loop.start() + + +class _TestReactor(TornadoReactor): + """Subclass of TornadoReactor for use in unittests. + + This can't go in the test.py file because of import-order dependencies + with the Twisted reactor test builder. + """ + def __init__(self): + # always use a new ioloop + IOLoop.clear_current() + IOLoop(make_current=True) + super(_TestReactor, self).__init__() + IOLoop.clear_current() + + def listenTCP(self, port, factory, backlog=50, interface=''): + # default to localhost to avoid firewall prompts on the mac + if not interface: + interface = '127.0.0.1' + return super(_TestReactor, self).listenTCP( + port, factory, backlog=backlog, interface=interface) + + def listenUDP(self, port, protocol, interface='', maxPacketSize=8192): + if not interface: + interface = '127.0.0.1' + return super(_TestReactor, self).listenUDP( + port, protocol, interface=interface, maxPacketSize=maxPacketSize) + + +def install(): + """Install this package as the default Twisted reactor. + + ``install()`` must be called very early in the startup process, + before most other twisted-related imports. Conversely, because it + initializes the `.IOLoop`, it cannot be called before + `.fork_processes` or multi-process `~.TCPServer.start`. These + conflicting requirements make it difficult to use `.TornadoReactor` + in multi-process mode, and an external process manager such as + ``supervisord`` is recommended instead. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + .. deprecated:: 5.1 + + This functio will be removed in Tornado 6.0. Use + ``twisted.internet.asyncioreactor.install`` instead. + """ + reactor = TornadoReactor() + from twisted.internet.main import installReactor # type: ignore + installReactor(reactor) + return reactor + + +@implementer(IReadDescriptor, IWriteDescriptor) +class _FD(object): + def __init__(self, fd, fileobj, handler): + self.fd = fd + self.fileobj = fileobj + self.handler = handler + self.reading = False + self.writing = False + self.lost = False + + def fileno(self): + return self.fd + + def doRead(self): + if not self.lost: + self.handler(self.fileobj, tornado.ioloop.IOLoop.READ) + + def doWrite(self): + if not self.lost: + self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE) + + def connectionLost(self, reason): + if not self.lost: + self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR) + self.lost = True + + writeConnectionLost = readConnectionLost = connectionLost + + def logPrefix(self): + return '' + + +class TwistedIOLoop(tornado.ioloop.IOLoop): + """IOLoop implementation that runs on Twisted. + + `TwistedIOLoop` implements the Tornado IOLoop interface on top of + the Twisted reactor. Recommended usage:: + + from tornado.platform.twisted import TwistedIOLoop + from twisted.internet import reactor + TwistedIOLoop().install() + # Set up your tornado application as usual using `IOLoop.instance` + reactor.run() + + Uses the global Twisted reactor by default. To create multiple + ``TwistedIOLoops`` in the same process, you must pass a unique reactor + when constructing each one. + + Not compatible with `tornado.process.Subprocess.set_exit_callback` + because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict + with each other. + + See also :meth:`tornado.ioloop.IOLoop.install` for general notes on + installing alternative IOLoops. + + .. deprecated:: 5.1 + + The `asyncio` event loop will be the only available implementation in + Tornado 6.0. + """ + def initialize(self, reactor=None, **kwargs): + super(TwistedIOLoop, self).initialize(**kwargs) + if reactor is None: + import twisted.internet.reactor # type: ignore + reactor = twisted.internet.reactor + self.reactor = reactor + self.fds = {} + + def close(self, all_fds=False): + fds = self.fds + self.reactor.removeAll() + for c in self.reactor.getDelayedCalls(): + c.cancel() + if all_fds: + for fd in fds.values(): + self.close_fd(fd.fileobj) + + def add_handler(self, fd, handler, events): + if fd in self.fds: + raise ValueError('fd %s added twice' % fd) + fd, fileobj = self.split_fd(fd) + self.fds[fd] = _FD(fd, fileobj, wrap(handler)) + if events & tornado.ioloop.IOLoop.READ: + self.fds[fd].reading = True + self.reactor.addReader(self.fds[fd]) + if events & tornado.ioloop.IOLoop.WRITE: + self.fds[fd].writing = True + self.reactor.addWriter(self.fds[fd]) + + def update_handler(self, fd, events): + fd, fileobj = self.split_fd(fd) + if events & tornado.ioloop.IOLoop.READ: + if not self.fds[fd].reading: + self.fds[fd].reading = True + self.reactor.addReader(self.fds[fd]) + else: + if self.fds[fd].reading: + self.fds[fd].reading = False + self.reactor.removeReader(self.fds[fd]) + if events & tornado.ioloop.IOLoop.WRITE: + if not self.fds[fd].writing: + self.fds[fd].writing = True + self.reactor.addWriter(self.fds[fd]) + else: + if self.fds[fd].writing: + self.fds[fd].writing = False + self.reactor.removeWriter(self.fds[fd]) + + def remove_handler(self, fd): + fd, fileobj = self.split_fd(fd) + if fd not in self.fds: + return + self.fds[fd].lost = True + if self.fds[fd].reading: + self.reactor.removeReader(self.fds[fd]) + if self.fds[fd].writing: + self.reactor.removeWriter(self.fds[fd]) + del self.fds[fd] + + def start(self): + old_current = IOLoop.current(instance=False) + try: + self._setup_logging() + self.make_current() + self.reactor.run() + finally: + if old_current is None: + IOLoop.clear_current() + else: + old_current.make_current() + + def stop(self): + self.reactor.crash() + + def add_timeout(self, deadline, callback, *args, **kwargs): + # This method could be simplified (since tornado 4.0) by + # overriding call_at instead of add_timeout, but we leave it + # for now as a test of backwards-compatibility. + if isinstance(deadline, numbers.Real): + delay = max(deadline - self.time(), 0) + elif isinstance(deadline, datetime.timedelta): + delay = timedelta_to_seconds(deadline) + else: + raise TypeError("Unsupported deadline %r") + return self.reactor.callLater( + delay, self._run_callback, + functools.partial(wrap(callback), *args, **kwargs)) + + def remove_timeout(self, timeout): + if timeout.active(): + timeout.cancel() + + def add_callback(self, callback, *args, **kwargs): + self.reactor.callFromThread( + self._run_callback, + functools.partial(wrap(callback), *args, **kwargs)) + + def add_callback_from_signal(self, callback, *args, **kwargs): + self.add_callback(callback, *args, **kwargs) + + +class TwistedResolver(Resolver): + """Twisted-based asynchronous resolver. + + This is a non-blocking and non-threaded resolver. It is + recommended only when threads cannot be used, since it has + limitations compared to the standard ``getaddrinfo``-based + `~tornado.netutil.Resolver` and + `~tornado.netutil.DefaultExecutorResolver`. Specifically, it returns at + most one result, and arguments other than ``host`` and ``family`` + are ignored. It may fail to resolve when ``family`` is not + ``socket.AF_UNSPEC``. + + Requires Twisted 12.1 or newer. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + """ + def initialize(self): + # partial copy of twisted.names.client.createResolver, which doesn't + # allow for a reactor to be passed in. + self.reactor = tornado.platform.twisted.TornadoReactor() + + host_resolver = twisted.names.hosts.Resolver('/etc/hosts') + cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor) + real_resolver = twisted.names.client.Resolver('/etc/resolv.conf', + reactor=self.reactor) + self.resolver = twisted.names.resolve.ResolverChain( + [host_resolver, cache_resolver, real_resolver]) + + @gen.coroutine + def resolve(self, host, port, family=0): + # getHostByName doesn't accept IP addresses, so if the input + # looks like an IP address just return it immediately. + if twisted.internet.abstract.isIPAddress(host): + resolved = host + resolved_family = socket.AF_INET + elif twisted.internet.abstract.isIPv6Address(host): + resolved = host + resolved_family = socket.AF_INET6 + else: + deferred = self.resolver.getHostByName(utf8(host)) + fut = Future() + deferred.addBoth(fut.set_result) + resolved = yield fut + if isinstance(resolved, failure.Failure): + try: + resolved.raiseException() + except twisted.names.error.DomainError as e: + raise IOError(e) + elif twisted.internet.abstract.isIPAddress(resolved): + resolved_family = socket.AF_INET + elif twisted.internet.abstract.isIPv6Address(resolved): + resolved_family = socket.AF_INET6 + else: + resolved_family = socket.AF_UNSPEC + if family != socket.AF_UNSPEC and family != resolved_family: + raise Exception('Requested socket family %d but got %d' % + (family, resolved_family)) + result = [ + (resolved_family, (resolved, port)), + ] + raise gen.Return(result) + + +if hasattr(gen.convert_yielded, 'register'): + @gen.convert_yielded.register(Deferred) # type: ignore + def _(d): + f = Future() + + def errback(failure): + try: + failure.raiseException() + # Should never happen, but just in case + raise Exception("errback called without error") + except: + future_set_exc_info(f, sys.exc_info()) + d.addCallbacks(f.set_result, errback) + return f diff --git a/server/www/packages/packages-windows/x86/tornado/platform/windows.py b/server/www/packages/packages-windows/x86/tornado/platform/windows.py new file mode 100644 index 0000000..4127700 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/platform/windows.py @@ -0,0 +1,20 @@ +# NOTE: win32 support is currently experimental, and not recommended +# for production use. + + +from __future__ import absolute_import, division, print_function +import ctypes # type: ignore +import ctypes.wintypes # type: ignore + +# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx +SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation +SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) # noqa: E501 +SetHandleInformation.restype = ctypes.wintypes.BOOL + +HANDLE_FLAG_INHERIT = 0x00000001 + + +def set_close_exec(fd): + success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0) + if not success: + raise ctypes.WinError() diff --git a/server/www/packages/packages-windows/x86/tornado/process.py b/server/www/packages/packages-windows/x86/tornado/process.py new file mode 100644 index 0000000..122fd7e --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/process.py @@ -0,0 +1,361 @@ +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utilities for working with multiple processes, including both forking +the server into multiple processes and managing subprocesses. +""" + +from __future__ import absolute_import, division, print_function + +import errno +import os +import signal +import subprocess +import sys +import time + +from binascii import hexlify + +from tornado.concurrent import Future, future_set_result_unless_cancelled +from tornado import ioloop +from tornado.iostream import PipeIOStream +from tornado.log import gen_log +from tornado.platform.auto import set_close_exec +from tornado import stack_context +from tornado.util import errno_from_exception, PY3 + +try: + import multiprocessing +except ImportError: + # Multiprocessing is not available on Google App Engine. + multiprocessing = None + +if PY3: + long = int + +# Re-export this exception for convenience. +try: + CalledProcessError = subprocess.CalledProcessError +except AttributeError: + # The subprocess module exists in Google App Engine, but is empty. + # This module isn't very useful in that case, but it should + # at least be importable. + if 'APPENGINE_RUNTIME' not in os.environ: + raise + + +def cpu_count(): + """Returns the number of processors on this machine.""" + if multiprocessing is None: + return 1 + try: + return multiprocessing.cpu_count() + except NotImplementedError: + pass + try: + return os.sysconf("SC_NPROCESSORS_CONF") + except (AttributeError, ValueError): + pass + gen_log.error("Could not detect number of processors; assuming 1") + return 1 + + +def _reseed_random(): + if 'random' not in sys.modules: + return + import random + # If os.urandom is available, this method does the same thing as + # random.seed (at least as of python 2.6). If os.urandom is not + # available, we mix in the pid in addition to a timestamp. + try: + seed = long(hexlify(os.urandom(16)), 16) + except NotImplementedError: + seed = int(time.time() * 1000) ^ os.getpid() + random.seed(seed) + + +def _pipe_cloexec(): + r, w = os.pipe() + set_close_exec(r) + set_close_exec(w) + return r, w + + +_task_id = None + + +def fork_processes(num_processes, max_restarts=100): + """Starts multiple worker processes. + + If ``num_processes`` is None or <= 0, we detect the number of cores + available on this machine and fork that number of child + processes. If ``num_processes`` is given and > 0, we fork that + specific number of sub-processes. + + Since we use processes and not threads, there is no shared memory + between any server code. + + Note that multiple processes are not compatible with the autoreload + module (or the ``autoreload=True`` option to `tornado.web.Application` + which defaults to True when ``debug=True``). + When using multiple processes, no IOLoops can be created or + referenced until after the call to ``fork_processes``. + + In each child process, ``fork_processes`` returns its *task id*, a + number between 0 and ``num_processes``. Processes that exit + abnormally (due to a signal or non-zero exit status) are restarted + with the same id (up to ``max_restarts`` times). In the parent + process, ``fork_processes`` returns None if all child processes + have exited normally, but will otherwise only exit by throwing an + exception. + """ + global _task_id + assert _task_id is None + if num_processes is None or num_processes <= 0: + num_processes = cpu_count() + gen_log.info("Starting %d processes", num_processes) + children = {} + + def start_child(i): + pid = os.fork() + if pid == 0: + # child process + _reseed_random() + global _task_id + _task_id = i + return i + else: + children[pid] = i + return None + + for i in range(num_processes): + id = start_child(i) + if id is not None: + return id + num_restarts = 0 + while children: + try: + pid, status = os.wait() + except OSError as e: + if errno_from_exception(e) == errno.EINTR: + continue + raise + if pid not in children: + continue + id = children.pop(pid) + if os.WIFSIGNALED(status): + gen_log.warning("child %d (pid %d) killed by signal %d, restarting", + id, pid, os.WTERMSIG(status)) + elif os.WEXITSTATUS(status) != 0: + gen_log.warning("child %d (pid %d) exited with status %d, restarting", + id, pid, os.WEXITSTATUS(status)) + else: + gen_log.info("child %d (pid %d) exited normally", id, pid) + continue + num_restarts += 1 + if num_restarts > max_restarts: + raise RuntimeError("Too many child restarts, giving up") + new_id = start_child(id) + if new_id is not None: + return new_id + # All child processes exited cleanly, so exit the master process + # instead of just returning to right after the call to + # fork_processes (which will probably just start up another IOLoop + # unless the caller checks the return value). + sys.exit(0) + + +def task_id(): + """Returns the current task id, if any. + + Returns None if this process was not created by `fork_processes`. + """ + global _task_id + return _task_id + + +class Subprocess(object): + """Wraps ``subprocess.Popen`` with IOStream support. + + The constructor is the same as ``subprocess.Popen`` with the following + additions: + + * ``stdin``, ``stdout``, and ``stderr`` may have the value + ``tornado.process.Subprocess.STREAM``, which will make the corresponding + attribute of the resulting Subprocess a `.PipeIOStream`. If this option + is used, the caller is responsible for closing the streams when done + with them. + + The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and + ``wait_for_exit`` methods do not work on Windows. There is + therefore no reason to use this class instead of + ``subprocess.Popen`` on that platform. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + + """ + STREAM = object() + + _initialized = False + _waiting = {} # type: ignore + + def __init__(self, *args, **kwargs): + self.io_loop = ioloop.IOLoop.current() + # All FDs we create should be closed on error; those in to_close + # should be closed in the parent process on success. + pipe_fds = [] + to_close = [] + if kwargs.get('stdin') is Subprocess.STREAM: + in_r, in_w = _pipe_cloexec() + kwargs['stdin'] = in_r + pipe_fds.extend((in_r, in_w)) + to_close.append(in_r) + self.stdin = PipeIOStream(in_w) + if kwargs.get('stdout') is Subprocess.STREAM: + out_r, out_w = _pipe_cloexec() + kwargs['stdout'] = out_w + pipe_fds.extend((out_r, out_w)) + to_close.append(out_w) + self.stdout = PipeIOStream(out_r) + if kwargs.get('stderr') is Subprocess.STREAM: + err_r, err_w = _pipe_cloexec() + kwargs['stderr'] = err_w + pipe_fds.extend((err_r, err_w)) + to_close.append(err_w) + self.stderr = PipeIOStream(err_r) + try: + self.proc = subprocess.Popen(*args, **kwargs) + except: + for fd in pipe_fds: + os.close(fd) + raise + for fd in to_close: + os.close(fd) + for attr in ['stdin', 'stdout', 'stderr', 'pid']: + if not hasattr(self, attr): # don't clobber streams set above + setattr(self, attr, getattr(self.proc, attr)) + self._exit_callback = None + self.returncode = None + + def set_exit_callback(self, callback): + """Runs ``callback`` when this process exits. + + The callback takes one argument, the return code of the process. + + This method uses a ``SIGCHLD`` handler, which is a global setting + and may conflict if you have other libraries trying to handle the + same signal. If you are using more than one ``IOLoop`` it may + be necessary to call `Subprocess.initialize` first to designate + one ``IOLoop`` to run the signal handlers. + + In many cases a close callback on the stdout or stderr streams + can be used as an alternative to an exit callback if the + signal handler is causing a problem. + """ + self._exit_callback = stack_context.wrap(callback) + Subprocess.initialize() + Subprocess._waiting[self.pid] = self + Subprocess._try_cleanup_process(self.pid) + + def wait_for_exit(self, raise_error=True): + """Returns a `.Future` which resolves when the process exits. + + Usage:: + + ret = yield proc.wait_for_exit() + + This is a coroutine-friendly alternative to `set_exit_callback` + (and a replacement for the blocking `subprocess.Popen.wait`). + + By default, raises `subprocess.CalledProcessError` if the process + has a non-zero exit status. Use ``wait_for_exit(raise_error=False)`` + to suppress this behavior and return the exit status without raising. + + .. versionadded:: 4.2 + """ + future = Future() + + def callback(ret): + if ret != 0 and raise_error: + # Unfortunately we don't have the original args any more. + future.set_exception(CalledProcessError(ret, None)) + else: + future_set_result_unless_cancelled(future, ret) + self.set_exit_callback(callback) + return future + + @classmethod + def initialize(cls): + """Initializes the ``SIGCHLD`` handler. + + The signal handler is run on an `.IOLoop` to avoid locking issues. + Note that the `.IOLoop` used for signal handling need not be the + same one used by individual Subprocess objects (as long as the + ``IOLoops`` are each running in separate threads). + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been + removed. + """ + if cls._initialized: + return + io_loop = ioloop.IOLoop.current() + cls._old_sigchld = signal.signal( + signal.SIGCHLD, + lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup)) + cls._initialized = True + + @classmethod + def uninitialize(cls): + """Removes the ``SIGCHLD`` handler.""" + if not cls._initialized: + return + signal.signal(signal.SIGCHLD, cls._old_sigchld) + cls._initialized = False + + @classmethod + def _cleanup(cls): + for pid in list(cls._waiting.keys()): # make a copy + cls._try_cleanup_process(pid) + + @classmethod + def _try_cleanup_process(cls, pid): + try: + ret_pid, status = os.waitpid(pid, os.WNOHANG) + except OSError as e: + if errno_from_exception(e) == errno.ECHILD: + return + if ret_pid == 0: + return + assert ret_pid == pid + subproc = cls._waiting.pop(pid) + subproc.io_loop.add_callback_from_signal( + subproc._set_returncode, status) + + def _set_returncode(self, status): + if os.WIFSIGNALED(status): + self.returncode = -os.WTERMSIG(status) + else: + assert os.WIFEXITED(status) + self.returncode = os.WEXITSTATUS(status) + # We've taken over wait() duty from the subprocess.Popen + # object. If we don't inform it of the process's return code, + # it will log a warning at destruction in python 3.6+. + self.proc.returncode = self.returncode + if self._exit_callback: + callback = self._exit_callback + self._exit_callback = None + callback(self.returncode) diff --git a/server/www/packages/packages-windows/x86/tornado/queues.py b/server/www/packages/packages-windows/x86/tornado/queues.py new file mode 100644 index 0000000..7cb96bf --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/queues.py @@ -0,0 +1,379 @@ +# Copyright 2015 The Tornado Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Asynchronous queues for coroutines. These classes are very similar +to those provided in the standard library's `asyncio package +`_. + +.. warning:: + + Unlike the standard library's `queue` module, the classes defined here + are *not* thread-safe. To use these queues from another thread, + use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread + before calling any queue methods. + +""" + +from __future__ import absolute_import, division, print_function + +import collections +import heapq + +from tornado import gen, ioloop +from tornado.concurrent import Future, future_set_result_unless_cancelled +from tornado.locks import Event + +__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty'] + + +class QueueEmpty(Exception): + """Raised by `.Queue.get_nowait` when the queue has no items.""" + pass + + +class QueueFull(Exception): + """Raised by `.Queue.put_nowait` when a queue is at its maximum size.""" + pass + + +def _set_timeout(future, timeout): + if timeout: + def on_timeout(): + if not future.done(): + future.set_exception(gen.TimeoutError()) + io_loop = ioloop.IOLoop.current() + timeout_handle = io_loop.add_timeout(timeout, on_timeout) + future.add_done_callback( + lambda _: io_loop.remove_timeout(timeout_handle)) + + +class _QueueIterator(object): + def __init__(self, q): + self.q = q + + def __anext__(self): + return self.q.get() + + +class Queue(object): + """Coordinate producer and consumer coroutines. + + If maxsize is 0 (the default) the queue size is unbounded. + + .. testcode:: + + from tornado import gen + from tornado.ioloop import IOLoop + from tornado.queues import Queue + + q = Queue(maxsize=2) + + async def consumer(): + async for item in q: + try: + print('Doing work on %s' % item) + await gen.sleep(0.01) + finally: + q.task_done() + + async def producer(): + for item in range(5): + await q.put(item) + print('Put %s' % item) + + async def main(): + # Start consumer without waiting (since it never finishes). + IOLoop.current().spawn_callback(consumer) + await producer() # Wait for producer to put all tasks. + await q.join() # Wait for consumer to finish all tasks. + print('Done') + + IOLoop.current().run_sync(main) + + .. testoutput:: + + Put 0 + Put 1 + Doing work on 0 + Put 2 + Doing work on 1 + Put 3 + Doing work on 2 + Put 4 + Doing work on 3 + Doing work on 4 + Done + + + In versions of Python without native coroutines (before 3.5), + ``consumer()`` could be written as:: + + @gen.coroutine + def consumer(): + while True: + item = yield q.get() + try: + print('Doing work on %s' % item) + yield gen.sleep(0.01) + finally: + q.task_done() + + .. versionchanged:: 4.3 + Added ``async for`` support in Python 3.5. + + """ + def __init__(self, maxsize=0): + if maxsize is None: + raise TypeError("maxsize can't be None") + + if maxsize < 0: + raise ValueError("maxsize can't be negative") + + self._maxsize = maxsize + self._init() + self._getters = collections.deque([]) # Futures. + self._putters = collections.deque([]) # Pairs of (item, Future). + self._unfinished_tasks = 0 + self._finished = Event() + self._finished.set() + + @property + def maxsize(self): + """Number of items allowed in the queue.""" + return self._maxsize + + def qsize(self): + """Number of items in the queue.""" + return len(self._queue) + + def empty(self): + return not self._queue + + def full(self): + if self.maxsize == 0: + return False + else: + return self.qsize() >= self.maxsize + + def put(self, item, timeout=None): + """Put an item into the queue, perhaps waiting until there is room. + + Returns a Future, which raises `tornado.util.TimeoutError` after a + timeout. + + ``timeout`` may be a number denoting a time (on the same + scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a + `datetime.timedelta` object for a deadline relative to the + current time. + """ + future = Future() + try: + self.put_nowait(item) + except QueueFull: + self._putters.append((item, future)) + _set_timeout(future, timeout) + else: + future.set_result(None) + return future + + def put_nowait(self, item): + """Put an item into the queue without blocking. + + If no free slot is immediately available, raise `QueueFull`. + """ + self._consume_expired() + if self._getters: + assert self.empty(), "queue non-empty, why are getters waiting?" + getter = self._getters.popleft() + self.__put_internal(item) + future_set_result_unless_cancelled(getter, self._get()) + elif self.full(): + raise QueueFull + else: + self.__put_internal(item) + + def get(self, timeout=None): + """Remove and return an item from the queue. + + Returns a Future which resolves once an item is available, or raises + `tornado.util.TimeoutError` after a timeout. + + ``timeout`` may be a number denoting a time (on the same + scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a + `datetime.timedelta` object for a deadline relative to the + current time. + """ + future = Future() + try: + future.set_result(self.get_nowait()) + except QueueEmpty: + self._getters.append(future) + _set_timeout(future, timeout) + return future + + def get_nowait(self): + """Remove and return an item from the queue without blocking. + + Return an item if one is immediately available, else raise + `QueueEmpty`. + """ + self._consume_expired() + if self._putters: + assert self.full(), "queue not full, why are putters waiting?" + item, putter = self._putters.popleft() + self.__put_internal(item) + future_set_result_unless_cancelled(putter, None) + return self._get() + elif self.qsize(): + return self._get() + else: + raise QueueEmpty + + def task_done(self): + """Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each `.get` used to fetch a task, a + subsequent call to `.task_done` tells the queue that the processing + on the task is complete. + + If a `.join` is blocking, it resumes when all items have been + processed; that is, when every `.put` is matched by a `.task_done`. + + Raises `ValueError` if called more times than `.put`. + """ + if self._unfinished_tasks <= 0: + raise ValueError('task_done() called too many times') + self._unfinished_tasks -= 1 + if self._unfinished_tasks == 0: + self._finished.set() + + def join(self, timeout=None): + """Block until all items in the queue are processed. + + Returns a Future, which raises `tornado.util.TimeoutError` after a + timeout. + """ + return self._finished.wait(timeout) + + def __aiter__(self): + return _QueueIterator(self) + + # These three are overridable in subclasses. + def _init(self): + self._queue = collections.deque() + + def _get(self): + return self._queue.popleft() + + def _put(self, item): + self._queue.append(item) + # End of the overridable methods. + + def __put_internal(self, item): + self._unfinished_tasks += 1 + self._finished.clear() + self._put(item) + + def _consume_expired(self): + # Remove timed-out waiters. + while self._putters and self._putters[0][1].done(): + self._putters.popleft() + + while self._getters and self._getters[0].done(): + self._getters.popleft() + + def __repr__(self): + return '<%s at %s %s>' % ( + type(self).__name__, hex(id(self)), self._format()) + + def __str__(self): + return '<%s %s>' % (type(self).__name__, self._format()) + + def _format(self): + result = 'maxsize=%r' % (self.maxsize, ) + if getattr(self, '_queue', None): + result += ' queue=%r' % self._queue + if self._getters: + result += ' getters[%s]' % len(self._getters) + if self._putters: + result += ' putters[%s]' % len(self._putters) + if self._unfinished_tasks: + result += ' tasks=%s' % self._unfinished_tasks + return result + + +class PriorityQueue(Queue): + """A `.Queue` that retrieves entries in priority order, lowest first. + + Entries are typically tuples like ``(priority number, data)``. + + .. testcode:: + + from tornado.queues import PriorityQueue + + q = PriorityQueue() + q.put((1, 'medium-priority item')) + q.put((0, 'high-priority item')) + q.put((10, 'low-priority item')) + + print(q.get_nowait()) + print(q.get_nowait()) + print(q.get_nowait()) + + .. testoutput:: + + (0, 'high-priority item') + (1, 'medium-priority item') + (10, 'low-priority item') + """ + def _init(self): + self._queue = [] + + def _put(self, item): + heapq.heappush(self._queue, item) + + def _get(self): + return heapq.heappop(self._queue) + + +class LifoQueue(Queue): + """A `.Queue` that retrieves the most recently put items first. + + .. testcode:: + + from tornado.queues import LifoQueue + + q = LifoQueue() + q.put(3) + q.put(2) + q.put(1) + + print(q.get_nowait()) + print(q.get_nowait()) + print(q.get_nowait()) + + .. testoutput:: + + 1 + 2 + 3 + """ + def _init(self): + self._queue = [] + + def _put(self, item): + self._queue.append(item) + + def _get(self): + return self._queue.pop() diff --git a/server/www/packages/packages-windows/x86/tornado/routing.py b/server/www/packages/packages-windows/x86/tornado/routing.py new file mode 100644 index 0000000..e56d1a7 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/routing.py @@ -0,0 +1,641 @@ +# Copyright 2015 The Tornado Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Flexible routing implementation. + +Tornado routes HTTP requests to appropriate handlers using `Router` +class implementations. The `tornado.web.Application` class is a +`Router` implementation and may be used directly, or the classes in +this module may be used for additional flexibility. The `RuleRouter` +class can match on more criteria than `.Application`, or the `Router` +interface can be subclassed for maximum customization. + +`Router` interface extends `~.httputil.HTTPServerConnectionDelegate` +to provide additional routing capabilities. This also means that any +`Router` implementation can be used directly as a ``request_callback`` +for `~.httpserver.HTTPServer` constructor. + +`Router` subclass must implement a ``find_handler`` method to provide +a suitable `~.httputil.HTTPMessageDelegate` instance to handle the +request: + +.. code-block:: python + + class CustomRouter(Router): + def find_handler(self, request, **kwargs): + # some routing logic providing a suitable HTTPMessageDelegate instance + return MessageDelegate(request.connection) + + class MessageDelegate(HTTPMessageDelegate): + def __init__(self, connection): + self.connection = connection + + def finish(self): + self.connection.write_headers( + ResponseStartLine("HTTP/1.1", 200, "OK"), + HTTPHeaders({"Content-Length": "2"}), + b"OK") + self.connection.finish() + + router = CustomRouter() + server = HTTPServer(router) + +The main responsibility of `Router` implementation is to provide a +mapping from a request to `~.httputil.HTTPMessageDelegate` instance +that will handle this request. In the example above we can see that +routing is possible even without instantiating an `~.web.Application`. + +For routing to `~.web.RequestHandler` implementations we need an +`~.web.Application` instance. `~.web.Application.get_handler_delegate` +provides a convenient way to create `~.httputil.HTTPMessageDelegate` +for a given request and `~.web.RequestHandler`. + +Here is a simple example of how we can we route to +`~.web.RequestHandler` subclasses by HTTP method: + +.. code-block:: python + + resources = {} + + class GetResource(RequestHandler): + def get(self, path): + if path not in resources: + raise HTTPError(404) + + self.finish(resources[path]) + + class PostResource(RequestHandler): + def post(self, path): + resources[path] = self.request.body + + class HTTPMethodRouter(Router): + def __init__(self, app): + self.app = app + + def find_handler(self, request, **kwargs): + handler = GetResource if request.method == "GET" else PostResource + return self.app.get_handler_delegate(request, handler, path_args=[request.path]) + + router = HTTPMethodRouter(Application()) + server = HTTPServer(router) + +`ReversibleRouter` interface adds the ability to distinguish between +the routes and reverse them to the original urls using route's name +and additional arguments. `~.web.Application` is itself an +implementation of `ReversibleRouter` class. + +`RuleRouter` and `ReversibleRuleRouter` are implementations of +`Router` and `ReversibleRouter` interfaces and can be used for +creating rule-based routing configurations. + +Rules are instances of `Rule` class. They contain a `Matcher`, which +provides the logic for determining whether the rule is a match for a +particular request and a target, which can be one of the following. + +1) An instance of `~.httputil.HTTPServerConnectionDelegate`: + +.. code-block:: python + + router = RuleRouter([ + Rule(PathMatches("/handler"), ConnectionDelegate()), + # ... more rules + ]) + + class ConnectionDelegate(HTTPServerConnectionDelegate): + def start_request(self, server_conn, request_conn): + return MessageDelegate(request_conn) + +2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type: + +.. code-block:: python + + router = RuleRouter([ + Rule(PathMatches("/callable"), request_callable) + ]) + + def request_callable(request): + request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK") + request.finish() + +3) Another `Router` instance: + +.. code-block:: python + + router = RuleRouter([ + Rule(PathMatches("/router.*"), CustomRouter()) + ]) + +Of course a nested `RuleRouter` or a `~.web.Application` is allowed: + +.. code-block:: python + + router = RuleRouter([ + Rule(HostMatches("example.com"), RuleRouter([ + Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)]))), + ])) + ]) + + server = HTTPServer(router) + +In the example below `RuleRouter` is used to route between applications: + +.. code-block:: python + + app1 = Application([ + (r"/app1/handler", Handler1), + # other handlers ... + ]) + + app2 = Application([ + (r"/app2/handler", Handler2), + # other handlers ... + ]) + + router = RuleRouter([ + Rule(PathMatches("/app1.*"), app1), + Rule(PathMatches("/app2.*"), app2) + ]) + + server = HTTPServer(router) + +For more information on application-level routing see docs for `~.web.Application`. + +.. versionadded:: 4.5 + +""" + +from __future__ import absolute_import, division, print_function + +import re +from functools import partial + +from tornado import httputil +from tornado.httpserver import _CallableAdapter +from tornado.escape import url_escape, url_unescape, utf8 +from tornado.log import app_log +from tornado.util import basestring_type, import_object, re_unescape, unicode_type + +try: + import typing # noqa +except ImportError: + pass + + +class Router(httputil.HTTPServerConnectionDelegate): + """Abstract router interface.""" + + def find_handler(self, request, **kwargs): + # type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate + """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate` + that can serve the request. + Routing implementations may pass additional kwargs to extend the routing logic. + + :arg httputil.HTTPServerRequest request: current HTTP request. + :arg kwargs: additional keyword arguments passed by routing implementation. + :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to + process the request. + """ + raise NotImplementedError() + + def start_request(self, server_conn, request_conn): + return _RoutingDelegate(self, server_conn, request_conn) + + +class ReversibleRouter(Router): + """Abstract router interface for routers that can handle named routes + and support reversing them to original urls. + """ + + def reverse_url(self, name, *args): + """Returns url string for a given route name and arguments + or ``None`` if no match is found. + + :arg str name: route name. + :arg args: url parameters. + :returns: parametrized url string for a given route name (or ``None``). + """ + raise NotImplementedError() + + +class _RoutingDelegate(httputil.HTTPMessageDelegate): + def __init__(self, router, server_conn, request_conn): + self.server_conn = server_conn + self.request_conn = request_conn + self.delegate = None + self.router = router # type: Router + + def headers_received(self, start_line, headers): + request = httputil.HTTPServerRequest( + connection=self.request_conn, + server_connection=self.server_conn, + start_line=start_line, headers=headers) + + self.delegate = self.router.find_handler(request) + if self.delegate is None: + app_log.debug("Delegate for %s %s request not found", + start_line.method, start_line.path) + self.delegate = _DefaultMessageDelegate(self.request_conn) + + return self.delegate.headers_received(start_line, headers) + + def data_received(self, chunk): + return self.delegate.data_received(chunk) + + def finish(self): + self.delegate.finish() + + def on_connection_close(self): + self.delegate.on_connection_close() + + +class _DefaultMessageDelegate(httputil.HTTPMessageDelegate): + def __init__(self, connection): + self.connection = connection + + def finish(self): + self.connection.write_headers( + httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders()) + self.connection.finish() + + +class RuleRouter(Router): + """Rule-based router implementation.""" + + def __init__(self, rules=None): + """Constructs a router from an ordered list of rules:: + + RuleRouter([ + Rule(PathMatches("/handler"), Target), + # ... more rules + ]) + + You can also omit explicit `Rule` constructor and use tuples of arguments:: + + RuleRouter([ + (PathMatches("/handler"), Target), + ]) + + `PathMatches` is a default matcher, so the example above can be simplified:: + + RuleRouter([ + ("/handler", Target), + ]) + + In the examples above, ``Target`` can be a nested `Router` instance, an instance of + `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, + accepting a request argument. + + :arg rules: a list of `Rule` instances or tuples of `Rule` + constructor arguments. + """ + self.rules = [] # type: typing.List[Rule] + if rules: + self.add_rules(rules) + + def add_rules(self, rules): + """Appends new rules to the router. + + :arg rules: a list of Rule instances (or tuples of arguments, which are + passed to Rule constructor). + """ + for rule in rules: + if isinstance(rule, (tuple, list)): + assert len(rule) in (2, 3, 4) + if isinstance(rule[0], basestring_type): + rule = Rule(PathMatches(rule[0]), *rule[1:]) + else: + rule = Rule(*rule) + + self.rules.append(self.process_rule(rule)) + + def process_rule(self, rule): + """Override this method for additional preprocessing of each rule. + + :arg Rule rule: a rule to be processed. + :returns: the same or modified Rule instance. + """ + return rule + + def find_handler(self, request, **kwargs): + for rule in self.rules: + target_params = rule.matcher.match(request) + if target_params is not None: + if rule.target_kwargs: + target_params['target_kwargs'] = rule.target_kwargs + + delegate = self.get_target_delegate( + rule.target, request, **target_params) + + if delegate is not None: + return delegate + + return None + + def get_target_delegate(self, target, request, **target_params): + """Returns an instance of `~.httputil.HTTPMessageDelegate` for a + Rule's target. This method is called by `~.find_handler` and can be + extended to provide additional target types. + + :arg target: a Rule's target. + :arg httputil.HTTPServerRequest request: current request. + :arg target_params: additional parameters that can be useful + for `~.httputil.HTTPMessageDelegate` creation. + """ + if isinstance(target, Router): + return target.find_handler(request, **target_params) + + elif isinstance(target, httputil.HTTPServerConnectionDelegate): + return target.start_request(request.server_connection, request.connection) + + elif callable(target): + return _CallableAdapter( + partial(target, **target_params), request.connection + ) + + return None + + +class ReversibleRuleRouter(ReversibleRouter, RuleRouter): + """A rule-based router that implements ``reverse_url`` method. + + Each rule added to this router may have a ``name`` attribute that can be + used to reconstruct an original uri. The actual reconstruction takes place + in a rule's matcher (see `Matcher.reverse`). + """ + + def __init__(self, rules=None): + self.named_rules = {} # type: typing.Dict[str] + super(ReversibleRuleRouter, self).__init__(rules) + + def process_rule(self, rule): + rule = super(ReversibleRuleRouter, self).process_rule(rule) + + if rule.name: + if rule.name in self.named_rules: + app_log.warning( + "Multiple handlers named %s; replacing previous value", + rule.name) + self.named_rules[rule.name] = rule + + return rule + + def reverse_url(self, name, *args): + if name in self.named_rules: + return self.named_rules[name].matcher.reverse(*args) + + for rule in self.rules: + if isinstance(rule.target, ReversibleRouter): + reversed_url = rule.target.reverse_url(name, *args) + if reversed_url is not None: + return reversed_url + + return None + + +class Rule(object): + """A routing rule.""" + + def __init__(self, matcher, target, target_kwargs=None, name=None): + """Constructs a Rule instance. + + :arg Matcher matcher: a `Matcher` instance used for determining + whether the rule should be considered a match for a specific + request. + :arg target: a Rule's target (typically a ``RequestHandler`` or + `~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`, + depending on routing implementation). + :arg dict target_kwargs: a dict of parameters that can be useful + at the moment of target instantiation (for example, ``status_code`` + for a ``RequestHandler`` subclass). They end up in + ``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate` + method. + :arg str name: the name of the rule that can be used to find it + in `ReversibleRouter.reverse_url` implementation. + """ + if isinstance(target, str): + # import the Module and instantiate the class + # Must be a fully qualified name (module.ClassName) + target = import_object(target) + + self.matcher = matcher # type: Matcher + self.target = target + self.target_kwargs = target_kwargs if target_kwargs else {} + self.name = name + + def reverse(self, *args): + return self.matcher.reverse(*args) + + def __repr__(self): + return '%s(%r, %s, kwargs=%r, name=%r)' % \ + (self.__class__.__name__, self.matcher, + self.target, self.target_kwargs, self.name) + + +class Matcher(object): + """Represents a matcher for request features.""" + + def match(self, request): + """Matches current instance against the request. + + :arg httputil.HTTPServerRequest request: current HTTP request + :returns: a dict of parameters to be passed to the target handler + (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs`` + can be passed for proper `~.web.RequestHandler` instantiation). + An empty dict is a valid (and common) return value to indicate a match + when the argument-passing features are not used. + ``None`` must be returned to indicate that there is no match.""" + raise NotImplementedError() + + def reverse(self, *args): + """Reconstructs full url from matcher instance and additional arguments.""" + return None + + +class AnyMatches(Matcher): + """Matches any request.""" + + def match(self, request): + return {} + + +class HostMatches(Matcher): + """Matches requests from hosts specified by ``host_pattern`` regex.""" + + def __init__(self, host_pattern): + if isinstance(host_pattern, basestring_type): + if not host_pattern.endswith("$"): + host_pattern += "$" + self.host_pattern = re.compile(host_pattern) + else: + self.host_pattern = host_pattern + + def match(self, request): + if self.host_pattern.match(request.host_name): + return {} + + return None + + +class DefaultHostMatches(Matcher): + """Matches requests from host that is equal to application's default_host. + Always returns no match if ``X-Real-Ip`` header is present. + """ + + def __init__(self, application, host_pattern): + self.application = application + self.host_pattern = host_pattern + + def match(self, request): + # Look for default host if not behind load balancer (for debugging) + if "X-Real-Ip" not in request.headers: + if self.host_pattern.match(self.application.default_host): + return {} + return None + + +class PathMatches(Matcher): + """Matches requests with paths specified by ``path_pattern`` regex.""" + + def __init__(self, path_pattern): + if isinstance(path_pattern, basestring_type): + if not path_pattern.endswith('$'): + path_pattern += '$' + self.regex = re.compile(path_pattern) + else: + self.regex = path_pattern + + assert len(self.regex.groupindex) in (0, self.regex.groups), \ + ("groups in url regexes must either be all named or all " + "positional: %r" % self.regex.pattern) + + self._path, self._group_count = self._find_groups() + + def match(self, request): + match = self.regex.match(request.path) + if match is None: + return None + if not self.regex.groups: + return {} + + path_args, path_kwargs = [], {} + + # Pass matched groups to the handler. Since + # match.groups() includes both named and + # unnamed groups, we want to use either groups + # or groupdict but not both. + if self.regex.groupindex: + path_kwargs = dict( + (str(k), _unquote_or_none(v)) + for (k, v) in match.groupdict().items()) + else: + path_args = [_unquote_or_none(s) for s in match.groups()] + + return dict(path_args=path_args, path_kwargs=path_kwargs) + + def reverse(self, *args): + if self._path is None: + raise ValueError("Cannot reverse url regex " + self.regex.pattern) + assert len(args) == self._group_count, "required number of arguments " \ + "not found" + if not len(args): + return self._path + converted_args = [] + for a in args: + if not isinstance(a, (unicode_type, bytes)): + a = str(a) + converted_args.append(url_escape(utf8(a), plus=False)) + return self._path % tuple(converted_args) + + def _find_groups(self): + """Returns a tuple (reverse string, group count) for a url. + + For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method + would return ('/%s/%s/', 2). + """ + pattern = self.regex.pattern + if pattern.startswith('^'): + pattern = pattern[1:] + if pattern.endswith('$'): + pattern = pattern[:-1] + + if self.regex.groups != pattern.count('('): + # The pattern is too complicated for our simplistic matching, + # so we can't support reversing it. + return None, None + + pieces = [] + for fragment in pattern.split('('): + if ')' in fragment: + paren_loc = fragment.index(')') + if paren_loc >= 0: + pieces.append('%s' + fragment[paren_loc + 1:]) + else: + try: + unescaped_fragment = re_unescape(fragment) + except ValueError: + # If we can't unescape part of it, we can't + # reverse this url. + return (None, None) + pieces.append(unescaped_fragment) + + return ''.join(pieces), self.regex.groups + + +class URLSpec(Rule): + """Specifies mappings between URLs and handlers. + + .. versionchanged: 4.5 + `URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for + backwards compatibility. + """ + def __init__(self, pattern, handler, kwargs=None, name=None): + """Parameters: + + * ``pattern``: Regular expression to be matched. Any capturing + groups in the regex will be passed in to the handler's + get/post/etc methods as arguments (by keyword if named, by + position if unnamed. Named and unnamed capturing groups + may not be mixed in the same rule). + + * ``handler``: `~.web.RequestHandler` subclass to be invoked. + + * ``kwargs`` (optional): A dictionary of additional arguments + to be passed to the handler's constructor. + + * ``name`` (optional): A name for this handler. Used by + `~.web.Application.reverse_url`. + + """ + super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name) + + self.regex = self.matcher.regex + self.handler_class = self.target + self.kwargs = kwargs + + def __repr__(self): + return '%s(%r, %s, kwargs=%r, name=%r)' % \ + (self.__class__.__name__, self.regex.pattern, + self.handler_class, self.kwargs, self.name) + + +def _unquote_or_none(s): + """None-safe wrapper around url_unescape to handle unmatched optional + groups correctly. + + Note that args are passed as bytes so the handler can decide what + encoding to use. + """ + if s is None: + return s + return url_unescape(s, encoding=None, plus=False) diff --git a/server/www/packages/packages-windows/x86/tornado/simple_httpclient.py b/server/www/packages/packages-windows/x86/tornado/simple_httpclient.py new file mode 100644 index 0000000..60b7956 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/simple_httpclient.py @@ -0,0 +1,566 @@ +from __future__ import absolute_import, division, print_function + +from tornado.escape import _unicode +from tornado import gen +from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy +from tornado import httputil +from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters +from tornado.ioloop import IOLoop +from tornado.iostream import StreamClosedError +from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults +from tornado.log import gen_log +from tornado import stack_context +from tornado.tcpclient import TCPClient +from tornado.util import PY3 + +import base64 +import collections +import copy +import functools +import re +import socket +import sys +import time +from io import BytesIO + + +if PY3: + import urllib.parse as urlparse +else: + import urlparse + +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine. + ssl = None + + +class HTTPTimeoutError(HTTPError): + """Error raised by SimpleAsyncHTTPClient on timeout. + + For historical reasons, this is a subclass of `.HTTPClientError` + which simulates a response code of 599. + + .. versionadded:: 5.1 + """ + def __init__(self, message): + super(HTTPTimeoutError, self).__init__(599, message=message) + + def __str__(self): + return self.message + + +class HTTPStreamClosedError(HTTPError): + """Error raised by SimpleAsyncHTTPClient when the underlying stream is closed. + + When a more specific exception is available (such as `ConnectionResetError`), + it may be raised instead of this one. + + For historical reasons, this is a subclass of `.HTTPClientError` + which simulates a response code of 599. + + .. versionadded:: 5.1 + """ + def __init__(self, message): + super(HTTPStreamClosedError, self).__init__(599, message=message) + + def __str__(self): + return self.message + + +class SimpleAsyncHTTPClient(AsyncHTTPClient): + """Non-blocking HTTP client with no external dependencies. + + This class implements an HTTP 1.1 client on top of Tornado's IOStreams. + Some features found in the curl-based AsyncHTTPClient are not yet + supported. In particular, proxies are not supported, connections + are not reused, and callers cannot select the network interface to be + used. + """ + def initialize(self, max_clients=10, + hostname_mapping=None, max_buffer_size=104857600, + resolver=None, defaults=None, max_header_size=None, + max_body_size=None): + """Creates a AsyncHTTPClient. + + Only a single AsyncHTTPClient instance exists per IOLoop + in order to provide limitations on the number of pending connections. + ``force_instance=True`` may be used to suppress this behavior. + + Note that because of this implicit reuse, unless ``force_instance`` + is used, only the first call to the constructor actually uses + its arguments. It is recommended to use the ``configure`` method + instead of the constructor to ensure that arguments take effect. + + ``max_clients`` is the number of concurrent requests that can be + in progress; when this limit is reached additional requests will be + queued. Note that time spent waiting in this queue still counts + against the ``request_timeout``. + + ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses. + It can be used to make local DNS changes when modifying system-wide + settings like ``/etc/hosts`` is not possible or desirable (e.g. in + unittests). + + ``max_buffer_size`` (default 100MB) is the number of bytes + that can be read into memory at once. ``max_body_size`` + (defaults to ``max_buffer_size``) is the largest response body + that the client will accept. Without a + ``streaming_callback``, the smaller of these two limits + applies; with a ``streaming_callback`` only ``max_body_size`` + does. + + .. versionchanged:: 4.2 + Added the ``max_body_size`` argument. + """ + super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults) + self.max_clients = max_clients + self.queue = collections.deque() + self.active = {} + self.waiting = {} + self.max_buffer_size = max_buffer_size + self.max_header_size = max_header_size + self.max_body_size = max_body_size + # TCPClient could create a Resolver for us, but we have to do it + # ourselves to support hostname_mapping. + if resolver: + self.resolver = resolver + self.own_resolver = False + else: + self.resolver = Resolver() + self.own_resolver = True + if hostname_mapping is not None: + self.resolver = OverrideResolver(resolver=self.resolver, + mapping=hostname_mapping) + self.tcp_client = TCPClient(resolver=self.resolver) + + def close(self): + super(SimpleAsyncHTTPClient, self).close() + if self.own_resolver: + self.resolver.close() + self.tcp_client.close() + + def fetch_impl(self, request, callback): + key = object() + self.queue.append((key, request, callback)) + if not len(self.active) < self.max_clients: + timeout_handle = self.io_loop.add_timeout( + self.io_loop.time() + min(request.connect_timeout, + request.request_timeout), + functools.partial(self._on_timeout, key, "in request queue")) + else: + timeout_handle = None + self.waiting[key] = (request, callback, timeout_handle) + self._process_queue() + if self.queue: + gen_log.debug("max_clients limit reached, request queued. " + "%d active, %d queued requests." % ( + len(self.active), len(self.queue))) + + def _process_queue(self): + with stack_context.NullContext(): + while self.queue and len(self.active) < self.max_clients: + key, request, callback = self.queue.popleft() + if key not in self.waiting: + continue + self._remove_timeout(key) + self.active[key] = (request, callback) + release_callback = functools.partial(self._release_fetch, key) + self._handle_request(request, release_callback, callback) + + def _connection_class(self): + return _HTTPConnection + + def _handle_request(self, request, release_callback, final_callback): + self._connection_class()( + self, request, release_callback, + final_callback, self.max_buffer_size, self.tcp_client, + self.max_header_size, self.max_body_size) + + def _release_fetch(self, key): + del self.active[key] + self._process_queue() + + def _remove_timeout(self, key): + if key in self.waiting: + request, callback, timeout_handle = self.waiting[key] + if timeout_handle is not None: + self.io_loop.remove_timeout(timeout_handle) + del self.waiting[key] + + def _on_timeout(self, key, info=None): + """Timeout callback of request. + + Construct a timeout HTTPResponse when a timeout occurs. + + :arg object key: A simple object to mark the request. + :info string key: More detailed timeout information. + """ + request, callback, timeout_handle = self.waiting[key] + self.queue.remove((key, request, callback)) + + error_message = "Timeout {0}".format(info) if info else "Timeout" + timeout_response = HTTPResponse( + request, 599, error=HTTPTimeoutError(error_message), + request_time=self.io_loop.time() - request.start_time) + self.io_loop.add_callback(callback, timeout_response) + del self.waiting[key] + + +class _HTTPConnection(httputil.HTTPMessageDelegate): + _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) + + def __init__(self, client, request, release_callback, + final_callback, max_buffer_size, tcp_client, + max_header_size, max_body_size): + self.io_loop = IOLoop.current() + self.start_time = self.io_loop.time() + self.start_wall_time = time.time() + self.client = client + self.request = request + self.release_callback = release_callback + self.final_callback = final_callback + self.max_buffer_size = max_buffer_size + self.tcp_client = tcp_client + self.max_header_size = max_header_size + self.max_body_size = max_body_size + self.code = None + self.headers = None + self.chunks = [] + self._decompressor = None + # Timeout handle returned by IOLoop.add_timeout + self._timeout = None + self._sockaddr = None + IOLoop.current().add_callback(self.run) + + @gen.coroutine + def run(self): + try: + self.parsed = urlparse.urlsplit(_unicode(self.request.url)) + if self.parsed.scheme not in ("http", "https"): + raise ValueError("Unsupported url scheme: %s" % + self.request.url) + # urlsplit results have hostname and port results, but they + # didn't support ipv6 literals until python 2.7. + netloc = self.parsed.netloc + if "@" in netloc: + userpass, _, netloc = netloc.rpartition("@") + host, port = httputil.split_host_and_port(netloc) + if port is None: + port = 443 if self.parsed.scheme == "https" else 80 + if re.match(r'^\[.*\]$', host): + # raw ipv6 addresses in urls are enclosed in brackets + host = host[1:-1] + self.parsed_hostname = host # save final host for _on_connect + + if self.request.allow_ipv6 is False: + af = socket.AF_INET + else: + af = socket.AF_UNSPEC + + ssl_options = self._get_ssl_options(self.parsed.scheme) + + timeout = min(self.request.connect_timeout, self.request.request_timeout) + if timeout: + self._timeout = self.io_loop.add_timeout( + self.start_time + timeout, + stack_context.wrap(functools.partial(self._on_timeout, "while connecting"))) + stream = yield self.tcp_client.connect( + host, port, af=af, + ssl_options=ssl_options, + max_buffer_size=self.max_buffer_size) + + if self.final_callback is None: + # final_callback is cleared if we've hit our timeout. + stream.close() + return + self.stream = stream + self.stream.set_close_callback(self.on_connection_close) + self._remove_timeout() + if self.final_callback is None: + return + if self.request.request_timeout: + self._timeout = self.io_loop.add_timeout( + self.start_time + self.request.request_timeout, + stack_context.wrap(functools.partial(self._on_timeout, "during request"))) + if (self.request.method not in self._SUPPORTED_METHODS and + not self.request.allow_nonstandard_methods): + raise KeyError("unknown method %s" % self.request.method) + for key in ('network_interface', + 'proxy_host', 'proxy_port', + 'proxy_username', 'proxy_password', + 'proxy_auth_mode'): + if getattr(self.request, key, None): + raise NotImplementedError('%s not supported' % key) + if "Connection" not in self.request.headers: + self.request.headers["Connection"] = "close" + if "Host" not in self.request.headers: + if '@' in self.parsed.netloc: + self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1] + else: + self.request.headers["Host"] = self.parsed.netloc + username, password = None, None + if self.parsed.username is not None: + username, password = self.parsed.username, self.parsed.password + elif self.request.auth_username is not None: + username = self.request.auth_username + password = self.request.auth_password or '' + if username is not None: + if self.request.auth_mode not in (None, "basic"): + raise ValueError("unsupported auth_mode %s", + self.request.auth_mode) + self.request.headers["Authorization"] = ( + b"Basic " + base64.b64encode( + httputil.encode_username_password(username, password))) + if self.request.user_agent: + self.request.headers["User-Agent"] = self.request.user_agent + if not self.request.allow_nonstandard_methods: + # Some HTTP methods nearly always have bodies while others + # almost never do. Fail in this case unless the user has + # opted out of sanity checks with allow_nonstandard_methods. + body_expected = self.request.method in ("POST", "PATCH", "PUT") + body_present = (self.request.body is not None or + self.request.body_producer is not None) + if ((body_expected and not body_present) or + (body_present and not body_expected)): + raise ValueError( + 'Body must %sbe None for method %s (unless ' + 'allow_nonstandard_methods is true)' % + ('not ' if body_expected else '', self.request.method)) + if self.request.expect_100_continue: + self.request.headers["Expect"] = "100-continue" + if self.request.body is not None: + # When body_producer is used the caller is responsible for + # setting Content-Length (or else chunked encoding will be used). + self.request.headers["Content-Length"] = str(len( + self.request.body)) + if (self.request.method == "POST" and + "Content-Type" not in self.request.headers): + self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" + if self.request.decompress_response: + self.request.headers["Accept-Encoding"] = "gzip" + req_path = ((self.parsed.path or '/') + + (('?' + self.parsed.query) if self.parsed.query else '')) + self.connection = self._create_connection(stream) + start_line = httputil.RequestStartLine(self.request.method, + req_path, '') + self.connection.write_headers(start_line, self.request.headers) + if self.request.expect_100_continue: + yield self.connection.read_response(self) + else: + yield self._write_body(True) + except Exception: + if not self._handle_exception(*sys.exc_info()): + raise + + def _get_ssl_options(self, scheme): + if scheme == "https": + if self.request.ssl_options is not None: + return self.request.ssl_options + # If we are using the defaults, don't construct a + # new SSLContext. + if (self.request.validate_cert and + self.request.ca_certs is None and + self.request.client_cert is None and + self.request.client_key is None): + return _client_ssl_defaults + ssl_ctx = ssl.create_default_context( + ssl.Purpose.SERVER_AUTH, + cafile=self.request.ca_certs) + if not self.request.validate_cert: + ssl_ctx.check_hostname = False + ssl_ctx.verify_mode = ssl.CERT_NONE + if self.request.client_cert is not None: + ssl_ctx.load_cert_chain(self.request.client_cert, + self.request.client_key) + if hasattr(ssl, 'OP_NO_COMPRESSION'): + # See netutil.ssl_options_to_context + ssl_ctx.options |= ssl.OP_NO_COMPRESSION + return ssl_ctx + return None + + def _on_timeout(self, info=None): + """Timeout callback of _HTTPConnection instance. + + Raise a `HTTPTimeoutError` when a timeout occurs. + + :info string key: More detailed timeout information. + """ + self._timeout = None + error_message = "Timeout {0}".format(info) if info else "Timeout" + if self.final_callback is not None: + self._handle_exception(HTTPTimeoutError, HTTPTimeoutError(error_message), + None) + + def _remove_timeout(self): + if self._timeout is not None: + self.io_loop.remove_timeout(self._timeout) + self._timeout = None + + def _create_connection(self, stream): + stream.set_nodelay(True) + connection = HTTP1Connection( + stream, True, + HTTP1ConnectionParameters( + no_keep_alive=True, + max_header_size=self.max_header_size, + max_body_size=self.max_body_size, + decompress=self.request.decompress_response), + self._sockaddr) + return connection + + @gen.coroutine + def _write_body(self, start_read): + if self.request.body is not None: + self.connection.write(self.request.body) + elif self.request.body_producer is not None: + fut = self.request.body_producer(self.connection.write) + if fut is not None: + yield fut + self.connection.finish() + if start_read: + try: + yield self.connection.read_response(self) + except StreamClosedError: + if not self._handle_exception(*sys.exc_info()): + raise + + def _release(self): + if self.release_callback is not None: + release_callback = self.release_callback + self.release_callback = None + release_callback() + + def _run_callback(self, response): + self._release() + if self.final_callback is not None: + final_callback = self.final_callback + self.final_callback = None + self.io_loop.add_callback(final_callback, response) + + def _handle_exception(self, typ, value, tb): + if self.final_callback: + self._remove_timeout() + if isinstance(value, StreamClosedError): + if value.real_error is None: + value = HTTPStreamClosedError("Stream closed") + else: + value = value.real_error + self._run_callback(HTTPResponse(self.request, 599, error=value, + request_time=self.io_loop.time() - self.start_time, + start_time=self.start_wall_time, + )) + + if hasattr(self, "stream"): + # TODO: this may cause a StreamClosedError to be raised + # by the connection's Future. Should we cancel the + # connection more gracefully? + self.stream.close() + return True + else: + # If our callback has already been called, we are probably + # catching an exception that is not caused by us but rather + # some child of our callback. Rather than drop it on the floor, + # pass it along, unless it's just the stream being closed. + return isinstance(value, StreamClosedError) + + def on_connection_close(self): + if self.final_callback is not None: + message = "Connection closed" + if self.stream.error: + raise self.stream.error + try: + raise HTTPStreamClosedError(message) + except HTTPStreamClosedError: + self._handle_exception(*sys.exc_info()) + + def headers_received(self, first_line, headers): + if self.request.expect_100_continue and first_line.code == 100: + self._write_body(False) + return + self.code = first_line.code + self.reason = first_line.reason + self.headers = headers + + if self._should_follow_redirect(): + return + + if self.request.header_callback is not None: + # Reassemble the start line. + self.request.header_callback('%s %s %s\r\n' % first_line) + for k, v in self.headers.get_all(): + self.request.header_callback("%s: %s\r\n" % (k, v)) + self.request.header_callback('\r\n') + + def _should_follow_redirect(self): + return (self.request.follow_redirects and + self.request.max_redirects > 0 and + self.code in (301, 302, 303, 307, 308)) + + def finish(self): + data = b''.join(self.chunks) + self._remove_timeout() + original_request = getattr(self.request, "original_request", + self.request) + if self._should_follow_redirect(): + assert isinstance(self.request, _RequestProxy) + new_request = copy.copy(self.request.request) + new_request.url = urlparse.urljoin(self.request.url, + self.headers["Location"]) + new_request.max_redirects = self.request.max_redirects - 1 + del new_request.headers["Host"] + # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 + # Client SHOULD make a GET request after a 303. + # According to the spec, 302 should be followed by the same + # method as the original request, but in practice browsers + # treat 302 the same as 303, and many servers use 302 for + # compatibility with pre-HTTP/1.1 user agents which don't + # understand the 303 status. + if self.code in (302, 303): + new_request.method = "GET" + new_request.body = None + for h in ["Content-Length", "Content-Type", + "Content-Encoding", "Transfer-Encoding"]: + try: + del self.request.headers[h] + except KeyError: + pass + new_request.original_request = original_request + final_callback = self.final_callback + self.final_callback = None + self._release() + fut = self.client.fetch(new_request, raise_error=False) + fut.add_done_callback(lambda f: final_callback(f.result())) + self._on_end_request() + return + if self.request.streaming_callback: + buffer = BytesIO() + else: + buffer = BytesIO(data) # TODO: don't require one big string? + response = HTTPResponse(original_request, + self.code, reason=getattr(self, 'reason', None), + headers=self.headers, + request_time=self.io_loop.time() - self.start_time, + start_time=self.start_wall_time, + buffer=buffer, + effective_url=self.request.url) + self._run_callback(response) + self._on_end_request() + + def _on_end_request(self): + self.stream.close() + + def data_received(self, chunk): + if self._should_follow_redirect(): + # We're going to follow a redirect so just discard the body. + return + if self.request.streaming_callback is not None: + self.request.streaming_callback(chunk) + else: + self.chunks.append(chunk) + + +if __name__ == "__main__": + AsyncHTTPClient.configure(SimpleAsyncHTTPClient) + main() diff --git a/server/www/packages/packages-windows/x86/tornado/speedups.cp37-win32.pyd b/server/www/packages/packages-windows/x86/tornado/speedups.cp37-win32.pyd new file mode 100644 index 0000000..494b821 Binary files /dev/null and b/server/www/packages/packages-windows/x86/tornado/speedups.cp37-win32.pyd differ diff --git a/server/www/packages/packages-windows/x86/tornado/stack_context.py b/server/www/packages/packages-windows/x86/tornado/stack_context.py new file mode 100644 index 0000000..a1eca4c --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/stack_context.py @@ -0,0 +1,413 @@ +# +# Copyright 2010 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""`StackContext` allows applications to maintain threadlocal-like state +that follows execution as it moves to other execution contexts. + +The motivating examples are to eliminate the need for explicit +``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to +allow some additional context to be kept for logging. + +This is slightly magic, but it's an extension of the idea that an +exception handler is a kind of stack-local state and when that stack +is suspended and resumed in a new context that state needs to be +preserved. `StackContext` shifts the burden of restoring that state +from each call site (e.g. wrapping each `.AsyncHTTPClient` callback +in ``async_callback``) to the mechanisms that transfer control from +one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, +thread pools, etc). + +Example usage:: + + @contextlib.contextmanager + def die_on_error(): + try: + yield + except Exception: + logging.error("exception in asynchronous operation",exc_info=True) + sys.exit(1) + + with StackContext(die_on_error): + # Any exception thrown here *or in callback and its descendants* + # will cause the process to exit instead of spinning endlessly + # in the ioloop. + http_client.fetch(url, callback) + ioloop.start() + +Most applications shouldn't have to work with `StackContext` directly. +Here are a few rules of thumb for when it's necessary: + +* If you're writing an asynchronous library that doesn't rely on a + stack_context-aware library like `tornado.ioloop` or `tornado.iostream` + (for example, if you're writing a thread pool), use + `.stack_context.wrap()` before any asynchronous operations to capture the + stack context from where the operation was started. + +* If you're writing an asynchronous library that has some shared + resources (such as a connection pool), create those shared resources + within a ``with stack_context.NullContext():`` block. This will prevent + ``StackContexts`` from leaking from one request to another. + +* If you want to write something like an exception handler that will + persist across asynchronous calls, create a new `StackContext` (or + `ExceptionStackContext`), and make your asynchronous calls in a ``with`` + block that references your `StackContext`. + +.. deprecated:: 5.1 + + The ``stack_context`` package is deprecated and will be removed in + Tornado 6.0. +""" + +from __future__ import absolute_import, division, print_function + +import sys +import threading +import warnings + +from tornado.util import raise_exc_info + + +class StackContextInconsistentError(Exception): + pass + + +class _State(threading.local): + def __init__(self): + self.contexts = (tuple(), None) + + +_state = _State() + + +class StackContext(object): + """Establishes the given context as a StackContext that will be transferred. + + Note that the parameter is a callable that returns a context + manager, not the context itself. That is, where for a + non-transferable context manager you would say:: + + with my_context(): + + StackContext takes the function itself rather than its result:: + + with StackContext(my_context): + + The result of ``with StackContext() as cb:`` is a deactivation + callback. Run this callback when the StackContext is no longer + needed to ensure that it is not propagated any further (note that + deactivating a context does not affect any instances of that + context that are currently pending). This is an advanced feature + and not necessary in most applications. + """ + def __init__(self, context_factory): + warnings.warn("StackContext is deprecated and will be removed in Tornado 6.0", + DeprecationWarning) + self.context_factory = context_factory + self.contexts = [] + self.active = True + + def _deactivate(self): + self.active = False + + # StackContext protocol + def enter(self): + context = self.context_factory() + self.contexts.append(context) + context.__enter__() + + def exit(self, type, value, traceback): + context = self.contexts.pop() + context.__exit__(type, value, traceback) + + # Note that some of this code is duplicated in ExceptionStackContext + # below. ExceptionStackContext is more common and doesn't need + # the full generality of this class. + def __enter__(self): + self.old_contexts = _state.contexts + self.new_contexts = (self.old_contexts[0] + (self,), self) + _state.contexts = self.new_contexts + + try: + self.enter() + except: + _state.contexts = self.old_contexts + raise + + return self._deactivate + + def __exit__(self, type, value, traceback): + try: + self.exit(type, value, traceback) + finally: + final_contexts = _state.contexts + _state.contexts = self.old_contexts + + # Generator coroutines and with-statements with non-local + # effects interact badly. Check here for signs of + # the stack getting out of sync. + # Note that this check comes after restoring _state.context + # so that if it fails things are left in a (relatively) + # consistent state. + if final_contexts is not self.new_contexts: + raise StackContextInconsistentError( + 'stack_context inconsistency (may be caused by yield ' + 'within a "with StackContext" block)') + + # Break up a reference to itself to allow for faster GC on CPython. + self.new_contexts = None + + +class ExceptionStackContext(object): + """Specialization of StackContext for exception handling. + + The supplied ``exception_handler`` function will be called in the + event of an uncaught exception in this context. The semantics are + similar to a try/finally clause, and intended use cases are to log + an error, close a socket, or similar cleanup actions. The + ``exc_info`` triple ``(type, value, traceback)`` will be passed to the + exception_handler function. + + If the exception handler returns true, the exception will be + consumed and will not be propagated to other exception handlers. + + .. versionadded:: 5.1 + + The ``delay_warning`` argument can be used to delay the emission + of DeprecationWarnings until an exception is caught by the + ``ExceptionStackContext``, which facilitates certain transitional + use cases. + """ + def __init__(self, exception_handler, delay_warning=False): + self.delay_warning = delay_warning + if not self.delay_warning: + warnings.warn( + "StackContext is deprecated and will be removed in Tornado 6.0", + DeprecationWarning) + self.exception_handler = exception_handler + self.active = True + + def _deactivate(self): + self.active = False + + def exit(self, type, value, traceback): + if type is not None: + if self.delay_warning: + warnings.warn( + "StackContext is deprecated and will be removed in Tornado 6.0", + DeprecationWarning) + return self.exception_handler(type, value, traceback) + + def __enter__(self): + self.old_contexts = _state.contexts + self.new_contexts = (self.old_contexts[0], self) + _state.contexts = self.new_contexts + + return self._deactivate + + def __exit__(self, type, value, traceback): + try: + if type is not None: + return self.exception_handler(type, value, traceback) + finally: + final_contexts = _state.contexts + _state.contexts = self.old_contexts + + if final_contexts is not self.new_contexts: + raise StackContextInconsistentError( + 'stack_context inconsistency (may be caused by yield ' + 'within a "with StackContext" block)') + + # Break up a reference to itself to allow for faster GC on CPython. + self.new_contexts = None + + +class NullContext(object): + """Resets the `StackContext`. + + Useful when creating a shared resource on demand (e.g. an + `.AsyncHTTPClient`) where the stack that caused the creating is + not relevant to future operations. + """ + def __enter__(self): + self.old_contexts = _state.contexts + _state.contexts = (tuple(), None) + + def __exit__(self, type, value, traceback): + _state.contexts = self.old_contexts + + +def _remove_deactivated(contexts): + """Remove deactivated handlers from the chain""" + # Clean ctx handlers + stack_contexts = tuple([h for h in contexts[0] if h.active]) + + # Find new head + head = contexts[1] + while head is not None and not head.active: + head = head.old_contexts[1] + + # Process chain + ctx = head + while ctx is not None: + parent = ctx.old_contexts[1] + + while parent is not None: + if parent.active: + break + ctx.old_contexts = parent.old_contexts + parent = parent.old_contexts[1] + + ctx = parent + + return (stack_contexts, head) + + +def wrap(fn): + """Returns a callable object that will restore the current `StackContext` + when executed. + + Use this whenever saving a callback to be executed later in a + different execution context (either in a different thread or + asynchronously in the same thread). + """ + # Check if function is already wrapped + if fn is None or hasattr(fn, '_wrapped'): + return fn + + # Capture current stack head + # TODO: Any other better way to store contexts and update them in wrapped function? + cap_contexts = [_state.contexts] + + if not cap_contexts[0][0] and not cap_contexts[0][1]: + # Fast path when there are no active contexts. + def null_wrapper(*args, **kwargs): + try: + current_state = _state.contexts + _state.contexts = cap_contexts[0] + return fn(*args, **kwargs) + finally: + _state.contexts = current_state + null_wrapper._wrapped = True + return null_wrapper + + def wrapped(*args, **kwargs): + ret = None + try: + # Capture old state + current_state = _state.contexts + + # Remove deactivated items + cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) + + # Force new state + _state.contexts = contexts + + # Current exception + exc = (None, None, None) + top = None + + # Apply stack contexts + last_ctx = 0 + stack = contexts[0] + + # Apply state + for n in stack: + try: + n.enter() + last_ctx += 1 + except: + # Exception happened. Record exception info and store top-most handler + exc = sys.exc_info() + top = n.old_contexts[1] + + # Execute callback if no exception happened while restoring state + if top is None: + try: + ret = fn(*args, **kwargs) + except: + exc = sys.exc_info() + top = contexts[1] + + # If there was exception, try to handle it by going through the exception chain + if top is not None: + exc = _handle_exception(top, exc) + else: + # Otherwise take shorter path and run stack contexts in reverse order + while last_ctx > 0: + last_ctx -= 1 + c = stack[last_ctx] + + try: + c.exit(*exc) + except: + exc = sys.exc_info() + top = c.old_contexts[1] + break + else: + top = None + + # If if exception happened while unrolling, take longer exception handler path + if top is not None: + exc = _handle_exception(top, exc) + + # If exception was not handled, raise it + if exc != (None, None, None): + raise_exc_info(exc) + finally: + _state.contexts = current_state + return ret + + wrapped._wrapped = True + return wrapped + + +def _handle_exception(tail, exc): + while tail is not None: + try: + if tail.exit(*exc): + exc = (None, None, None) + except: + exc = sys.exc_info() + + tail = tail.old_contexts[1] + + return exc + + +def run_with_stack_context(context, func): + """Run a coroutine ``func`` in the given `StackContext`. + + It is not safe to have a ``yield`` statement within a ``with StackContext`` + block, so it is difficult to use stack context with `.gen.coroutine`. + This helper function runs the function in the correct context while + keeping the ``yield`` and ``with`` statements syntactically separate. + + Example:: + + @gen.coroutine + def incorrect(): + with StackContext(ctx): + # ERROR: this will raise StackContextInconsistentError + yield other_coroutine() + + @gen.coroutine + def correct(): + yield run_with_stack_context(StackContext(ctx), other_coroutine) + + .. versionadded:: 3.1 + """ + with context: + return func() diff --git a/server/www/packages/packages-windows/x86/tornado/tcpclient.py b/server/www/packages/packages-windows/x86/tornado/tcpclient.py new file mode 100644 index 0000000..3a1b58c --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/tcpclient.py @@ -0,0 +1,276 @@ +# +# Copyright 2014 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A non-blocking TCP connection factory. +""" +from __future__ import absolute_import, division, print_function + +import functools +import socket +import numbers +import datetime + +from tornado.concurrent import Future, future_add_done_callback +from tornado.ioloop import IOLoop +from tornado.iostream import IOStream +from tornado import gen +from tornado.netutil import Resolver +from tornado.platform.auto import set_close_exec +from tornado.gen import TimeoutError +from tornado.util import timedelta_to_seconds + +_INITIAL_CONNECT_TIMEOUT = 0.3 + + +class _Connector(object): + """A stateless implementation of the "Happy Eyeballs" algorithm. + + "Happy Eyeballs" is documented in RFC6555 as the recommended practice + for when both IPv4 and IPv6 addresses are available. + + In this implementation, we partition the addresses by family, and + make the first connection attempt to whichever address was + returned first by ``getaddrinfo``. If that connection fails or + times out, we begin a connection in parallel to the first address + of the other family. If there are additional failures we retry + with other addresses, keeping one connection attempt per family + in flight at a time. + + http://tools.ietf.org/html/rfc6555 + + """ + def __init__(self, addrinfo, connect): + self.io_loop = IOLoop.current() + self.connect = connect + + self.future = Future() + self.timeout = None + self.connect_timeout = None + self.last_error = None + self.remaining = len(addrinfo) + self.primary_addrs, self.secondary_addrs = self.split(addrinfo) + self.streams = set() + + @staticmethod + def split(addrinfo): + """Partition the ``addrinfo`` list by address family. + + Returns two lists. The first list contains the first entry from + ``addrinfo`` and all others with the same family, and the + second list contains all other addresses (normally one list will + be AF_INET and the other AF_INET6, although non-standard resolvers + may return additional families). + """ + primary = [] + secondary = [] + primary_af = addrinfo[0][0] + for af, addr in addrinfo: + if af == primary_af: + primary.append((af, addr)) + else: + secondary.append((af, addr)) + return primary, secondary + + def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None): + self.try_connect(iter(self.primary_addrs)) + self.set_timeout(timeout) + if connect_timeout is not None: + self.set_connect_timeout(connect_timeout) + return self.future + + def try_connect(self, addrs): + try: + af, addr = next(addrs) + except StopIteration: + # We've reached the end of our queue, but the other queue + # might still be working. Send a final error on the future + # only when both queues are finished. + if self.remaining == 0 and not self.future.done(): + self.future.set_exception(self.last_error or + IOError("connection failed")) + return + stream, future = self.connect(af, addr) + self.streams.add(stream) + future_add_done_callback( + future, functools.partial(self.on_connect_done, addrs, af, addr)) + + def on_connect_done(self, addrs, af, addr, future): + self.remaining -= 1 + try: + stream = future.result() + except Exception as e: + if self.future.done(): + return + # Error: try again (but remember what happened so we have an + # error to raise in the end) + self.last_error = e + self.try_connect(addrs) + if self.timeout is not None: + # If the first attempt failed, don't wait for the + # timeout to try an address from the secondary queue. + self.io_loop.remove_timeout(self.timeout) + self.on_timeout() + return + self.clear_timeouts() + if self.future.done(): + # This is a late arrival; just drop it. + stream.close() + else: + self.streams.discard(stream) + self.future.set_result((af, addr, stream)) + self.close_streams() + + def set_timeout(self, timeout): + self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, + self.on_timeout) + + def on_timeout(self): + self.timeout = None + if not self.future.done(): + self.try_connect(iter(self.secondary_addrs)) + + def clear_timeout(self): + if self.timeout is not None: + self.io_loop.remove_timeout(self.timeout) + + def set_connect_timeout(self, connect_timeout): + self.connect_timeout = self.io_loop.add_timeout( + connect_timeout, self.on_connect_timeout) + + def on_connect_timeout(self): + if not self.future.done(): + self.future.set_exception(TimeoutError()) + self.close_streams() + + def clear_timeouts(self): + if self.timeout is not None: + self.io_loop.remove_timeout(self.timeout) + if self.connect_timeout is not None: + self.io_loop.remove_timeout(self.connect_timeout) + + def close_streams(self): + for stream in self.streams: + stream.close() + + +class TCPClient(object): + """A non-blocking TCP connection factory. + + .. versionchanged:: 5.0 + The ``io_loop`` argument (deprecated since version 4.1) has been removed. + """ + def __init__(self, resolver=None): + if resolver is not None: + self.resolver = resolver + self._own_resolver = False + else: + self.resolver = Resolver() + self._own_resolver = True + + def close(self): + if self._own_resolver: + self.resolver.close() + + @gen.coroutine + def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, + max_buffer_size=None, source_ip=None, source_port=None, + timeout=None): + """Connect to the given host and port. + + Asynchronously returns an `.IOStream` (or `.SSLIOStream` if + ``ssl_options`` is not None). + + Using the ``source_ip`` kwarg, one can specify the source + IP address to use when establishing the connection. + In case the user needs to resolve and + use a specific interface, it has to be handled outside + of Tornado as this depends very much on the platform. + + Raises `TimeoutError` if the input future does not complete before + ``timeout``, which may be specified in any form allowed by + `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time + relative to `.IOLoop.time`) + + Similarly, when the user requires a certain source port, it can + be specified using the ``source_port`` arg. + + .. versionchanged:: 4.5 + Added the ``source_ip`` and ``source_port`` arguments. + + .. versionchanged:: 5.0 + Added the ``timeout`` argument. + """ + if timeout is not None: + if isinstance(timeout, numbers.Real): + timeout = IOLoop.current().time() + timeout + elif isinstance(timeout, datetime.timedelta): + timeout = IOLoop.current().time() + timedelta_to_seconds(timeout) + else: + raise TypeError("Unsupported timeout %r" % timeout) + if timeout is not None: + addrinfo = yield gen.with_timeout( + timeout, self.resolver.resolve(host, port, af)) + else: + addrinfo = yield self.resolver.resolve(host, port, af) + connector = _Connector( + addrinfo, + functools.partial(self._create_stream, max_buffer_size, + source_ip=source_ip, source_port=source_port) + ) + af, addr, stream = yield connector.start(connect_timeout=timeout) + # TODO: For better performance we could cache the (af, addr) + # information here and re-use it on subsequent connections to + # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) + if ssl_options is not None: + if timeout is not None: + stream = yield gen.with_timeout(timeout, stream.start_tls( + False, ssl_options=ssl_options, server_hostname=host)) + else: + stream = yield stream.start_tls(False, ssl_options=ssl_options, + server_hostname=host) + raise gen.Return(stream) + + def _create_stream(self, max_buffer_size, af, addr, source_ip=None, + source_port=None): + # Always connect in plaintext; we'll convert to ssl if necessary + # after one connection has completed. + source_port_bind = source_port if isinstance(source_port, int) else 0 + source_ip_bind = source_ip + if source_port_bind and not source_ip: + # User required a specific port, but did not specify + # a certain source IP, will bind to the default loopback. + source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' + # Trying to use the same address family as the requested af socket: + # - 127.0.0.1 for IPv4 + # - ::1 for IPv6 + socket_obj = socket.socket(af) + set_close_exec(socket_obj.fileno()) + if source_port_bind or source_ip_bind: + # If the user requires binding also to a specific IP/port. + try: + socket_obj.bind((source_ip_bind, source_port_bind)) + except socket.error: + socket_obj.close() + # Fail loudly if unable to use the IP/port. + raise + try: + stream = IOStream(socket_obj, + max_buffer_size=max_buffer_size) + except socket.error as e: + fu = Future() + fu.set_exception(e) + return fu + else: + return stream, stream.connect(addr) diff --git a/server/www/packages/packages-windows/x86/tornado/tcpserver.py b/server/www/packages/packages-windows/x86/tornado/tcpserver.py new file mode 100644 index 0000000..4f5d6f0 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/tcpserver.py @@ -0,0 +1,299 @@ +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A non-blocking, single-threaded TCP server.""" +from __future__ import absolute_import, division, print_function + +import errno +import os +import socket + +from tornado import gen +from tornado.log import app_log +from tornado.ioloop import IOLoop +from tornado.iostream import IOStream, SSLIOStream +from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket +from tornado import process +from tornado.util import errno_from_exception + +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine. + ssl = None + + +class TCPServer(object): + r"""A non-blocking, single-threaded TCP server. + + To use `TCPServer`, define a subclass which overrides the `handle_stream` + method. For example, a simple echo server could be defined like this:: + + from tornado.tcpserver import TCPServer + from tornado.iostream import StreamClosedError + from tornado import gen + + class EchoServer(TCPServer): + async def handle_stream(self, stream, address): + while True: + try: + data = await stream.read_until(b"\n") + await stream.write(data) + except StreamClosedError: + break + + To make this server serve SSL traffic, send the ``ssl_options`` keyword + argument with an `ssl.SSLContext` object. For compatibility with older + versions of Python ``ssl_options`` may also be a dictionary of keyword + arguments for the `ssl.wrap_socket` method.:: + + ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), + os.path.join(data_dir, "mydomain.key")) + TCPServer(ssl_options=ssl_ctx) + + `TCPServer` initialization follows one of three patterns: + + 1. `listen`: simple single-process:: + + server = TCPServer() + server.listen(8888) + IOLoop.current().start() + + 2. `bind`/`start`: simple multi-process:: + + server = TCPServer() + server.bind(8888) + server.start(0) # Forks multiple sub-processes + IOLoop.current().start() + + When using this interface, an `.IOLoop` must *not* be passed + to the `TCPServer` constructor. `start` will always start + the server on the default singleton `.IOLoop`. + + 3. `add_sockets`: advanced multi-process:: + + sockets = bind_sockets(8888) + tornado.process.fork_processes(0) + server = TCPServer() + server.add_sockets(sockets) + IOLoop.current().start() + + The `add_sockets` interface is more complicated, but it can be + used with `tornado.process.fork_processes` to give you more + flexibility in when the fork happens. `add_sockets` can + also be used in single-process servers if you want to create + your listening sockets in some way other than + `~tornado.netutil.bind_sockets`. + + .. versionadded:: 3.1 + The ``max_buffer_size`` argument. + + .. versionchanged:: 5.0 + The ``io_loop`` argument has been removed. + """ + def __init__(self, ssl_options=None, max_buffer_size=None, + read_chunk_size=None): + self.ssl_options = ssl_options + self._sockets = {} # fd -> socket object + self._handlers = {} # fd -> remove_handler callable + self._pending_sockets = [] + self._started = False + self._stopped = False + self.max_buffer_size = max_buffer_size + self.read_chunk_size = read_chunk_size + + # Verify the SSL options. Otherwise we don't get errors until clients + # connect. This doesn't verify that the keys are legitimate, but + # the SSL module doesn't do that until there is a connected socket + # which seems like too much work + if self.ssl_options is not None and isinstance(self.ssl_options, dict): + # Only certfile is required: it can contain both keys + if 'certfile' not in self.ssl_options: + raise KeyError('missing key "certfile" in ssl_options') + + if not os.path.exists(self.ssl_options['certfile']): + raise ValueError('certfile "%s" does not exist' % + self.ssl_options['certfile']) + if ('keyfile' in self.ssl_options and + not os.path.exists(self.ssl_options['keyfile'])): + raise ValueError('keyfile "%s" does not exist' % + self.ssl_options['keyfile']) + + def listen(self, port, address=""): + """Starts accepting connections on the given port. + + This method may be called more than once to listen on multiple ports. + `listen` takes effect immediately; it is not necessary to call + `TCPServer.start` afterwards. It is, however, necessary to start + the `.IOLoop`. + """ + sockets = bind_sockets(port, address=address) + self.add_sockets(sockets) + + def add_sockets(self, sockets): + """Makes this server start accepting connections on the given sockets. + + The ``sockets`` parameter is a list of socket objects such as + those returned by `~tornado.netutil.bind_sockets`. + `add_sockets` is typically used in combination with that + method and `tornado.process.fork_processes` to provide greater + control over the initialization of a multi-process server. + """ + for sock in sockets: + self._sockets[sock.fileno()] = sock + self._handlers[sock.fileno()] = add_accept_handler( + sock, self._handle_connection) + + def add_socket(self, socket): + """Singular version of `add_sockets`. Takes a single socket object.""" + self.add_sockets([socket]) + + def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128, + reuse_port=False): + """Binds this server to the given port on the given address. + + To start the server, call `start`. If you want to run this server + in a single process, you can call `listen` as a shortcut to the + sequence of `bind` and `start` calls. + + Address may be either an IP address or hostname. If it's a hostname, + the server will listen on all IP addresses associated with the + name. Address may be an empty string or None to listen on all + available interfaces. Family may be set to either `socket.AF_INET` + or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise + both will be used if available. + + The ``backlog`` argument has the same meaning as for + `socket.listen `. The ``reuse_port`` argument + has the same meaning as for `.bind_sockets`. + + This method may be called multiple times prior to `start` to listen + on multiple ports or interfaces. + + .. versionchanged:: 4.4 + Added the ``reuse_port`` argument. + """ + sockets = bind_sockets(port, address=address, family=family, + backlog=backlog, reuse_port=reuse_port) + if self._started: + self.add_sockets(sockets) + else: + self._pending_sockets.extend(sockets) + + def start(self, num_processes=1): + """Starts this server in the `.IOLoop`. + + By default, we run the server in this process and do not fork any + additional child process. + + If num_processes is ``None`` or <= 0, we detect the number of cores + available on this machine and fork that number of child + processes. If num_processes is given and > 1, we fork that + specific number of sub-processes. + + Since we use processes and not threads, there is no shared memory + between any server code. + + Note that multiple processes are not compatible with the autoreload + module (or the ``autoreload=True`` option to `tornado.web.Application` + which defaults to True when ``debug=True``). + When using multiple processes, no IOLoops can be created or + referenced until after the call to ``TCPServer.start(n)``. + """ + assert not self._started + self._started = True + if num_processes != 1: + process.fork_processes(num_processes) + sockets = self._pending_sockets + self._pending_sockets = [] + self.add_sockets(sockets) + + def stop(self): + """Stops listening for new connections. + + Requests currently in progress may still continue after the + server is stopped. + """ + if self._stopped: + return + self._stopped = True + for fd, sock in self._sockets.items(): + assert sock.fileno() == fd + # Unregister socket from IOLoop + self._handlers.pop(fd)() + sock.close() + + def handle_stream(self, stream, address): + """Override to handle a new `.IOStream` from an incoming connection. + + This method may be a coroutine; if so any exceptions it raises + asynchronously will be logged. Accepting of incoming connections + will not be blocked by this coroutine. + + If this `TCPServer` is configured for SSL, ``handle_stream`` + may be called before the SSL handshake has completed. Use + `.SSLIOStream.wait_for_handshake` if you need to verify the client's + certificate or use NPN/ALPN. + + .. versionchanged:: 4.2 + Added the option for this method to be a coroutine. + """ + raise NotImplementedError() + + def _handle_connection(self, connection, address): + if self.ssl_options is not None: + assert ssl, "Python 2.6+ and OpenSSL required for SSL" + try: + connection = ssl_wrap_socket(connection, + self.ssl_options, + server_side=True, + do_handshake_on_connect=False) + except ssl.SSLError as err: + if err.args[0] == ssl.SSL_ERROR_EOF: + return connection.close() + else: + raise + except socket.error as err: + # If the connection is closed immediately after it is created + # (as in a port scan), we can get one of several errors. + # wrap_socket makes an internal call to getpeername, + # which may return either EINVAL (Mac OS X) or ENOTCONN + # (Linux). If it returns ENOTCONN, this error is + # silently swallowed by the ssl module, so we need to + # catch another error later on (AttributeError in + # SSLIOStream._do_ssl_handshake). + # To test this behavior, try nmap with the -sT flag. + # https://github.com/tornadoweb/tornado/pull/750 + if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL): + return connection.close() + else: + raise + try: + if self.ssl_options is not None: + stream = SSLIOStream(connection, + max_buffer_size=self.max_buffer_size, + read_chunk_size=self.read_chunk_size) + else: + stream = IOStream(connection, + max_buffer_size=self.max_buffer_size, + read_chunk_size=self.read_chunk_size) + + future = self.handle_stream(stream, address) + if future is not None: + IOLoop.current().add_future(gen.convert_yielded(future), + lambda f: f.result()) + except Exception: + app_log.error("Error in connection callback", exc_info=True) diff --git a/server/www/packages/packages-windows/x86/tornado/template.py b/server/www/packages/packages-windows/x86/tornado/template.py new file mode 100644 index 0000000..61b9874 --- /dev/null +++ b/server/www/packages/packages-windows/x86/tornado/template.py @@ -0,0 +1,976 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A simple template system that compiles templates to Python code. + +Basic usage looks like:: + + t = template.Template("{{ myvalue }}") + print(t.generate(myvalue="XXX")) + +`Loader` is a class that loads templates from a root directory and caches +the compiled templates:: + + loader = template.Loader("/home/btaylor") + print(loader.load("test.html").generate(myvalue="XXX")) + +We compile all templates to raw Python. Error-reporting is currently... uh, +interesting. Syntax for the templates:: + + ### base.html + + + {% block title %}Default title{% end %} + + +
    + {% for student in students %} + {% block student %} +
  • {{ escape(student.name) }}
  • + {% end %} + {% end %} +
+ + + + ### bold.html + {% extends "base.html" %} + + {% block title %}A bolder title{% end %} + + {% block student %} +
  • {{ escape(student.name) }}
  • + {% end %} + +Unlike most other template systems, we do not put any restrictions on the +expressions you can include in your statements. ``if`` and ``for`` blocks get +translated exactly into Python, so you can do complex expressions like:: + + {% for student in [p for p in people if p.student and p.age > 23] %} +
  • {{ escape(student.name) }}
  • + {% end %} + +Translating directly to Python means you can apply functions to expressions +easily, like the ``escape()`` function in the examples above. You can pass +functions in to your template just like any other variable +(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`):: + + ### Python code + def add(x, y): + return x + y + template.execute(add=add) + + ### The template + {{ add(1, 2) }} + +We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`, +`.json_encode()`, and `.squeeze()` to all templates by default. + +Typical applications do not create `Template` or `Loader` instances by +hand, but instead use the `~.RequestHandler.render` and +`~.RequestHandler.render_string` methods of +`tornado.web.RequestHandler`, which load templates automatically based +on the ``template_path`` `.Application` setting. + +Variable names beginning with ``_tt_`` are reserved by the template +system and should not be used by application code. + +Syntax Reference +---------------- + +Template expressions are surrounded by double curly braces: ``{{ ... }}``. +The contents may be any python expression, which will be escaped according +to the current autoescape setting and inserted into the output. Other +template directives use ``{% %}``. + +To comment out a section so that it is omitted from the output, surround it +with ``{# ... #}``. + +These tags may be escaped as ``{{!``, ``{%!``, and ``{#!`` +if you need to include a literal ``{{``, ``{%``, or ``{#`` in the output. + + +``{% apply *function* %}...{% end %}`` + Applies a function to the output of all template code between ``apply`` + and ``end``:: + + {% apply linkify %}{{name}} said: {{message}}{% end %} + + Note that as an implementation detail apply blocks are implemented + as nested functions and thus may interact strangely with variables + set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}`` + within loops. + +``{% autoescape *function* %}`` + Sets the autoescape mode for the current file. This does not affect + other files, even those referenced by ``{% include %}``. Note that + autoescaping can also be configured globally, at the `.Application` + or `Loader`.:: + + {% autoescape xhtml_escape %} + {% autoescape None %} + +``{% block *name* %}...{% end %}`` + Indicates a named, replaceable block for use with ``{% extends %}``. + Blocks in the parent template will be replaced with the contents of + the same-named block in a child template.:: + + + {% block title %}Default title{% end %} + + + {% extends "base.html" %} + {% block title %}My page title{% end %} + +``{% comment ... %}`` + A comment which will be removed from the template output. Note that + there is no ``{% end %}`` tag; the comment goes from the word ``comment`` + to the closing ``%}`` tag. + +``{% extends *filename* %}`` + Inherit from another template. Templates that use ``extends`` should + contain one or more ``block`` tags to replace content from the parent + template. Anything in the child template not contained in a ``block`` + tag will be ignored. For an example, see the ``{% block %}`` tag. + +``{% for *var* in *expr* %}...{% end %}`` + Same as the python ``for`` statement. ``{% break %}`` and + ``{% continue %}`` may be used inside the loop. + +``{% from *x* import *y* %}`` + Same as the python ``import`` statement. + +``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}`` + Conditional statement - outputs the first section whose condition is + true. (The ``elif`` and ``else`` sections are optional) + +``{% import *module* %}`` + Same as the python ``import`` statement. + +``{% include *filename* %}`` + Includes another template file. The included file can see all the local + variables as if it were copied directly to the point of the ``include`` + directive (the ``{% autoescape %}`` directive is an exception). + Alternately, ``{% module Template(filename, **kwargs) %}`` may be used + to include another template with an isolated namespace. + +``{% module *expr* %}`` + Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is + not escaped:: + + {% module Template("foo.html", arg=42) %} + + ``UIModules`` are a feature of the `tornado.web.RequestHandler` + class (and specifically its ``render`` method) and will not work + when the template system is used on its own in other contexts. + +``{% raw *expr* %}`` + Outputs the result of the given expression without autoescaping. + +``{% set *x* = *y* %}`` + Sets a local variable. + +``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}`` + Same as the python ``try`` statement. + +``{% while *condition* %}... {% end %}`` + Same as the python ``while`` statement. ``{% break %}`` and + ``{% continue %}`` may be used inside the loop. + +``{% whitespace *mode* %}`` + Sets the whitespace mode for the remainder of the current file + (or until the next ``{% whitespace %}`` directive). See + `filter_whitespace` for available options. New in Tornado 4.3. +""" + +from __future__ import absolute_import, division, print_function + +import datetime +import linecache +import os.path +import posixpath +import re +import threading + +from tornado import escape +from tornado.log import app_log +from tornado.util import ObjectDict, exec_in, unicode_type, PY3 + +if PY3: + from io import StringIO +else: + from cStringIO import StringIO + +_DEFAULT_AUTOESCAPE = "xhtml_escape" +_UNSET = object() + + +def filter_whitespace(mode, text): + """Transform whitespace in ``text`` according to ``mode``. + + Available modes are: + + * ``all``: Return all whitespace unmodified. + * ``single``: Collapse consecutive whitespace with a single whitespace + character, preserving newlines. + * ``oneline``: Collapse all runs of whitespace into a single space + character, removing all newlines in the process. + + .. versionadded:: 4.3 + """ + if mode == 'all': + return text + elif mode == 'single': + text = re.sub(r"([\t ]+)", " ", text) + text = re.sub(r"(\s*\n\s*)", "\n", text) + return text + elif mode == 'oneline': + return re.sub(r"(\s+)", " ", text) + else: + raise Exception("invalid whitespace mode %s" % mode) + + +class Template(object): + """A compiled template. + + We compile into Python from the given template_string. You can generate + the template from variables with generate(). + """ + # note that the constructor's signature is not extracted with + # autodoc because _UNSET looks like garbage. When changing + # this signature update website/sphinx/template.rst too. + def __init__(self, template_string, name="", loader=None, + compress_whitespace=_UNSET, autoescape=_UNSET, + whitespace=None): + """Construct a Template. + + :arg str template_string: the contents of the template file. + :arg str name: the filename from which the template was loaded + (used for error message). + :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible + for this template, used to resolve ``{% include %}`` and ``{% extend %}`` directives. + :arg bool compress_whitespace: Deprecated since Tornado 4.3. + Equivalent to ``whitespace="single"`` if true and + ``whitespace="all"`` if false. + :arg str autoescape: The name of a function in the template + namespace, or ``None`` to disable escaping by default. + :arg str whitespace: A string specifying treatment of whitespace; + see `filter_whitespace` for options. + + .. versionchanged:: 4.3 + Added ``whitespace`` parameter; deprecated ``compress_whitespace``. + """ + self.name = escape.native_str(name) + + if compress_whitespace is not _UNSET: + # Convert deprecated compress_whitespace (bool) to whitespace (str). + if whitespace is not None: + raise Exception("cannot set both whitespace and compress_whitespace") + whitespace = "single" if compress_whitespace else "all" + if whitespace is None: + if loader and loader.whitespace: + whitespace = loader.whitespace + else: + # Whitespace defaults by filename. + if name.endswith(".html") or name.endswith(".js"): + whitespace = "single" + else: + whitespace = "all" + # Validate the whitespace setting. + filter_whitespace(whitespace, '') + + if autoescape is not _UNSET: + self.autoescape = autoescape + elif loader: + self.autoescape = loader.autoescape + else: + self.autoescape = _DEFAULT_AUTOESCAPE + + self.namespace = loader.namespace if loader else {} + reader = _TemplateReader(name, escape.native_str(template_string), + whitespace) + self.file = _File(self, _parse(reader, self)) + self.code = self._generate_python(loader) + self.loader = loader + try: + # Under python2.5, the fake filename used here must match + # the module name used in __name__ below. + # The dont_inherit flag prevents template.py's future imports + # from being applied to the generated code. + self.compiled = compile( + escape.to_unicode(self.code), + "%s.generated.py" % self.name.replace('.', '_'), + "exec", dont_inherit=True) + except Exception: + formatted_code = _format_code(self.code).rstrip() + app_log.error("%s code:\n%s", self.name, formatted_code) + raise + + def generate(self, **kwargs): + """Generate this template with the given arguments.""" + namespace = { + "escape": escape.xhtml_escape, + "xhtml_escape": escape.xhtml_escape, + "url_escape": escape.url_escape, + "json_encode": escape.json_encode, + "squeeze": escape.squeeze, + "linkify": escape.linkify, + "datetime": datetime, + "_tt_utf8": escape.utf8, # for internal use + "_tt_string_types": (unicode_type, bytes), + # __name__ and __loader__ allow the traceback mechanism to find + # the generated source code. + "__name__": self.name.replace('.', '_'), + "__loader__": ObjectDict(get_source=lambda name: self.code), + } + namespace.update(self.namespace) + namespace.update(kwargs) + exec_in(self.compiled, namespace) + execute = namespace["_tt_execute"] + # Clear the traceback module's cache of source data now that + # we've generated a new template (mainly for this module's + # unittests, where different tests reuse the same name). + linecache.clearcache() + return execute() + + def _generate_python(self, loader): + buffer = StringIO() + try: + # named_blocks maps from names to _NamedBlock objects + named_blocks = {} + ancestors = self._get_ancestors(loader) + ancestors.reverse() + for ancestor in ancestors: + ancestor.find_named_blocks(loader, named_blocks) + writer = _CodeWriter(buffer, named_blocks, loader, + ancestors[0].template) + ancestors[0].generate(writer) + return buffer.getvalue() + finally: + buffer.close() + + def _get_ancestors(self, loader): + ancestors = [self.file] + for chunk in self.file.body.chunks: + if isinstance(chunk, _ExtendsBlock): + if not loader: + raise ParseError("{% extends %} block found, but no " + "template loader") + template = loader.load(chunk.name, self.name) + ancestors.extend(template._get_ancestors(loader)) + return ancestors + + +class BaseLoader(object): + """Base class for template loaders. + + You must use a template loader to use template constructs like + ``{% extends %}`` and ``{% include %}``. The loader caches all + templates after they are loaded the first time. + """ + def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None, + whitespace=None): + """Construct a template loader. + + :arg str autoescape: The name of a function in the template + namespace, such as "xhtml_escape", or ``None`` to disable + autoescaping by default. + :arg dict namespace: A dictionary to be added to the default template + namespace, or ``None``. + :arg str whitespace: A string specifying default behavior for + whitespace in templates; see `filter_whitespace` for options. + Default is "single" for files ending in ".html" and ".js" and + "all" for other files. + + .. versionchanged:: 4.3 + Added ``whitespace`` parameter. + """ + self.autoescape = autoescape + self.namespace = namespace or {} + self.whitespace = whitespace + self.templates = {} + # self.lock protects self.templates. It's a reentrant lock + # because templates may load other templates via `include` or + # `extends`. Note that thanks to the GIL this code would be safe + # even without the lock, but could lead to wasted work as multiple + # threads tried to compile the same template simultaneously. + self.lock = threading.RLock() + + def reset(self): + """Resets the cache of compiled templates.""" + with self.lock: + self.templates = {} + + def resolve_path(self, name, parent_path=None): + """Converts a possibly-relative path to absolute (used internally).""" + raise NotImplementedError() + + def load(self, name, parent_path=None): + """Loads a template.""" + name = self.resolve_path(name, parent_path=parent_path) + with self.lock: + if name not in self.templates: + self.templates[name] = self._create_template(name) + return self.templates[name] + + def _create_template(self, name): + raise NotImplementedError() + + +class Loader(BaseLoader): + """A template loader that loads from a single root directory. + """ + def __init__(self, root_directory, **kwargs): + super(Loader, self).__init__(**kwargs) + self.root = os.path.abspath(root_directory) + + def resolve_path(self, name, parent_path=None): + if parent_path and not parent_path.startswith("<") and \ + not parent_path.startswith("/") and \ + not name.startswith("/"): + current_path = os.path.join(self.root, parent_path) + file_dir = os.path.dirname(os.path.abspath(current_path)) + relative_path = os.path.abspath(os.path.join(file_dir, name)) + if relative_path.startswith(self.root): + name = relative_path[len(self.root) + 1:] + return name + + def _create_template(self, name): + path = os.path.join(self.root, name) + with open(path, "rb") as f: + template = Template(f.read(), name=name, loader=self) + return template + + +class DictLoader(BaseLoader): + """A template loader that loads from a dictionary.""" + def __init__(self, dict, **kwargs): + super(DictLoader, self).__init__(**kwargs) + self.dict = dict + + def resolve_path(self, name, parent_path=None): + if parent_path and not parent_path.startswith("<") and \ + not parent_path.startswith("/") and \ + not name.startswith("/"): + file_dir = posixpath.dirname(parent_path) + name = posixpath.normpath(posixpath.join(file_dir, name)) + return name + + def _create_template(self, name): + return Template(self.dict[name], name=name, loader=self) + + +class _Node(object): + def each_child(self): + return () + + def generate(self, writer): + raise NotImplementedError() + + def find_named_blocks(self, loader, named_blocks): + for child in self.each_child(): + child.find_named_blocks(loader, named_blocks) + + +class _File(_Node): + def __init__(self, template, body): + self.template = template + self.body = body + self.line = 0 + + def generate(self, writer): + writer.write_line("def _tt_execute():", self.line) + with writer.indent(): + writer.write_line("_tt_buffer = []", self.line) + writer.write_line("_tt_append = _tt_buffer.append", self.line) + self.body.generate(writer) + writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) + + def each_child(self): + return (self.body,) + + +class _ChunkList(_Node): + def __init__(self, chunks): + self.chunks = chunks + + def generate(self, writer): + for chunk in self.chunks: + chunk.generate(writer) + + def each_child(self): + return self.chunks + + +class _NamedBlock(_Node): + def __init__(self, name, body, template, line): + self.name = name + self.body = body + self.template = template + self.line = line + + def each_child(self): + return (self.body,) + + def generate(self, writer): + block = writer.named_blocks[self.name] + with writer.include(block.template, self.line): + block.body.generate(writer) + + def find_named_blocks(self, loader, named_blocks): + named_blocks[self.name] = self + _Node.find_named_blocks(self, loader, named_blocks) + + +class _ExtendsBlock(_Node): + def __init__(self, name): + self.name = name + + +class _IncludeBlock(_Node): + def __init__(self, name, reader, line): + self.name = name + self.template_name = reader.name + self.line = line + + def find_named_blocks(self, loader, named_blocks): + included = loader.load(self.name, self.template_name) + included.file.find_named_blocks(loader, named_blocks) + + def generate(self, writer): + included = writer.loader.load(self.name, self.template_name) + with writer.include(included, self.line): + included.file.body.generate(writer) + + +class _ApplyBlock(_Node): + def __init__(self, method, line, body=None): + self.method = method + self.line = line + self.body = body + + def each_child(self): + return (self.body,) + + def generate(self, writer): + method_name = "_tt_apply%d" % writer.apply_counter + writer.apply_counter += 1 + writer.write_line("def %s():" % method_name, self.line) + with writer.indent(): + writer.write_line("_tt_buffer = []", self.line) + writer.write_line("_tt_append = _tt_buffer.append", self.line) + self.body.generate(writer) + writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) + writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % ( + self.method, method_name), self.line) + + +class _ControlBlock(_Node): + def __init__(self, statement, line, body=None): + self.statement = statement + self.line = line + self.body = body + + def each_child(self): + return (self.body,) + + def generate(self, writer): + writer.write_line("%s:" % self.statement, self.line) + with writer.indent(): + self.body.generate(writer) + # Just in case the body was empty + writer.write_line("pass", self.line) + + +class _IntermediateControlBlock(_Node): + def __init__(self, statement, line): + self.statement = statement + self.line = line + + def generate(self, writer): + # In case the previous block was empty + writer.write_line("pass", self.line) + writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1) + + +class _Statement(_Node): + def __init__(self, statement, line): + self.statement = statement + self.line = line + + def generate(self, writer): + writer.write_line(self.statement, self.line) + + +class _Expression(_Node): + def __init__(self, expression, line, raw=False): + self.expression = expression + self.line = line + self.raw = raw + + def generate(self, writer): + writer.write_line("_tt_tmp = %s" % self.expression, self.line) + writer.write_line("if isinstance(_tt_tmp, _tt_string_types):" + " _tt_tmp = _tt_utf8(_tt_tmp)", self.line) + writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line) + if not self.raw and writer.current_template.autoescape is not None: + # In python3 functions like xhtml_escape return unicode, + # so we have to convert to utf8 again. + writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" % + writer.current_template.autoescape, self.line) + writer.write_line("_tt_append(_tt_tmp)", self.line) + + +class _Module(_Expression): + def __init__(self, expression, line): + super(_Module, self).__init__("_tt_modules." + expression, line, + raw=True) + + +class _Text(_Node): + def __init__(self, value, line, whitespace): + self.value = value + self.line = line + self.whitespace = whitespace + + def generate(self, writer): + value = self.value + + # Compress whitespace if requested, with a crude heuristic to avoid + # altering preformatted whitespace. + if "
    " not in value:
    +            value = filter_whitespace(self.whitespace, value)
    +
    +        if value:
    +            writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
    +
    +
    +class ParseError(Exception):
    +    """Raised for template syntax errors.
    +
    +    ``ParseError`` instances have ``filename`` and ``lineno`` attributes
    +    indicating the position of the error.
    +
    +    .. versionchanged:: 4.3
    +       Added ``filename`` and ``lineno`` attributes.
    +    """
    +    def __init__(self, message, filename=None, lineno=0):
    +        self.message = message
    +        # The names "filename" and "lineno" are chosen for consistency
    +        # with python SyntaxError.
    +        self.filename = filename
    +        self.lineno = lineno
    +
    +    def __str__(self):
    +        return '%s at %s:%d' % (self.message, self.filename, self.lineno)
    +
    +
    +class _CodeWriter(object):
    +    def __init__(self, file, named_blocks, loader, current_template):
    +        self.file = file
    +        self.named_blocks = named_blocks
    +        self.loader = loader
    +        self.current_template = current_template
    +        self.apply_counter = 0
    +        self.include_stack = []
    +        self._indent = 0
    +
    +    def indent_size(self):
    +        return self._indent
    +
    +    def indent(self):
    +        class Indenter(object):
    +            def __enter__(_):
    +                self._indent += 1
    +                return self
    +
    +            def __exit__(_, *args):
    +                assert self._indent > 0
    +                self._indent -= 1
    +
    +        return Indenter()
    +
    +    def include(self, template, line):
    +        self.include_stack.append((self.current_template, line))
    +        self.current_template = template
    +
    +        class IncludeTemplate(object):
    +            def __enter__(_):
    +                return self
    +
    +            def __exit__(_, *args):
    +                self.current_template = self.include_stack.pop()[0]
    +
    +        return IncludeTemplate()
    +
    +    def write_line(self, line, line_number, indent=None):
    +        if indent is None:
    +            indent = self._indent
    +        line_comment = '  # %s:%d' % (self.current_template.name, line_number)
    +        if self.include_stack:
    +            ancestors = ["%s:%d" % (tmpl.name, lineno)
    +                         for (tmpl, lineno) in self.include_stack]
    +            line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
    +        print("    " * indent + line + line_comment, file=self.file)
    +
    +
    +class _TemplateReader(object):
    +    def __init__(self, name, text, whitespace):
    +        self.name = name
    +        self.text = text
    +        self.whitespace = whitespace
    +        self.line = 1
    +        self.pos = 0
    +
    +    def find(self, needle, start=0, end=None):
    +        assert start >= 0, start
    +        pos = self.pos
    +        start += pos
    +        if end is None:
    +            index = self.text.find(needle, start)
    +        else:
    +            end += pos
    +            assert end >= start
    +            index = self.text.find(needle, start, end)
    +        if index != -1:
    +            index -= pos
    +        return index
    +
    +    def consume(self, count=None):
    +        if count is None:
    +            count = len(self.text) - self.pos
    +        newpos = self.pos + count
    +        self.line += self.text.count("\n", self.pos, newpos)
    +        s = self.text[self.pos:newpos]
    +        self.pos = newpos
    +        return s
    +
    +    def remaining(self):
    +        return len(self.text) - self.pos
    +
    +    def __len__(self):
    +        return self.remaining()
    +
    +    def __getitem__(self, key):
    +        if type(key) is slice:
    +            size = len(self)
    +            start, stop, step = key.indices(size)
    +            if start is None:
    +                start = self.pos
    +            else:
    +                start += self.pos
    +            if stop is not None:
    +                stop += self.pos
    +            return self.text[slice(start, stop, step)]
    +        elif key < 0:
    +            return self.text[key]
    +        else:
    +            return self.text[self.pos + key]
    +
    +    def __str__(self):
    +        return self.text[self.pos:]
    +
    +    def raise_parse_error(self, msg):
    +        raise ParseError(msg, self.name, self.line)
    +
    +
    +def _format_code(code):
    +    lines = code.splitlines()
    +    format = "%%%dd  %%s\n" % len(repr(len(lines) + 1))
    +    return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
    +
    +
    +def _parse(reader, template, in_block=None, in_loop=None):
    +    body = _ChunkList([])
    +    while True:
    +        # Find next template directive
    +        curly = 0
    +        while True:
    +            curly = reader.find("{", curly)
    +            if curly == -1 or curly + 1 == reader.remaining():
    +                # EOF
    +                if in_block:
    +                    reader.raise_parse_error(
    +                        "Missing {%% end %%} block for %s" % in_block)
    +                body.chunks.append(_Text(reader.consume(), reader.line,
    +                                         reader.whitespace))
    +                return body
    +            # If the first curly brace is not the start of a special token,
    +            # start searching from the character after it
    +            if reader[curly + 1] not in ("{", "%", "#"):
    +                curly += 1
    +                continue
    +            # When there are more than 2 curlies in a row, use the
    +            # innermost ones.  This is useful when generating languages
    +            # like latex where curlies are also meaningful
    +            if (curly + 2 < reader.remaining() and
    +                    reader[curly + 1] == '{' and reader[curly + 2] == '{'):
    +                curly += 1
    +                continue
    +            break
    +
    +        # Append any text before the special token
    +        if curly > 0:
    +            cons = reader.consume(curly)
    +            body.chunks.append(_Text(cons, reader.line,
    +                                     reader.whitespace))
    +
    +        start_brace = reader.consume(2)
    +        line = reader.line
    +
    +        # Template directives may be escaped as "{{!" or "{%!".
    +        # In this case output the braces and consume the "!".
    +        # This is especially useful in conjunction with jquery templates,
    +        # which also use double braces.
    +        if reader.remaining() and reader[0] == "!":
    +            reader.consume(1)
    +            body.chunks.append(_Text(start_brace, line,
    +                                     reader.whitespace))
    +            continue
    +
    +        # Comment
    +        if start_brace == "{#":
    +            end = reader.find("#}")
    +            if end == -1:
    +                reader.raise_parse_error("Missing end comment #}")
    +            contents = reader.consume(end).strip()
    +            reader.consume(2)
    +            continue
    +
    +        # Expression
    +        if start_brace == "{{":
    +            end = reader.find("}}")
    +            if end == -1:
    +                reader.raise_parse_error("Missing end expression }}")
    +            contents = reader.consume(end).strip()
    +            reader.consume(2)
    +            if not contents:
    +                reader.raise_parse_error("Empty expression")
    +            body.chunks.append(_Expression(contents, line))
    +            continue
    +
    +        # Block
    +        assert start_brace == "{%", start_brace
    +        end = reader.find("%}")
    +        if end == -1:
    +            reader.raise_parse_error("Missing end block %}")
    +        contents = reader.consume(end).strip()
    +        reader.consume(2)
    +        if not contents:
    +            reader.raise_parse_error("Empty block tag ({% %})")
    +
    +        operator, space, suffix = contents.partition(" ")
    +        suffix = suffix.strip()
    +
    +        # Intermediate ("else", "elif", etc) blocks
    +        intermediate_blocks = {
    +            "else": set(["if", "for", "while", "try"]),
    +            "elif": set(["if"]),
    +            "except": set(["try"]),
    +            "finally": set(["try"]),
    +        }
    +        allowed_parents = intermediate_blocks.get(operator)
    +        if allowed_parents is not None:
    +            if not in_block:
    +                reader.raise_parse_error("%s outside %s block" %
    +                                         (operator, allowed_parents))
    +            if in_block not in allowed_parents:
    +                reader.raise_parse_error(
    +                    "%s block cannot be attached to %s block" %
    +                    (operator, in_block))
    +            body.chunks.append(_IntermediateControlBlock(contents, line))
    +            continue
    +
    +        # End tag
    +        elif operator == "end":
    +            if not in_block:
    +                reader.raise_parse_error("Extra {% end %} block")
    +            return body
    +
    +        elif operator in ("extends", "include", "set", "import", "from",
    +                          "comment", "autoescape", "whitespace", "raw",
    +                          "module"):
    +            if operator == "comment":
    +                continue
    +            if operator == "extends":
    +                suffix = suffix.strip('"').strip("'")
    +                if not suffix:
    +                    reader.raise_parse_error("extends missing file path")
    +                block = _ExtendsBlock(suffix)
    +            elif operator in ("import", "from"):
    +                if not suffix:
    +                    reader.raise_parse_error("import missing statement")
    +                block = _Statement(contents, line)
    +            elif operator == "include":
    +                suffix = suffix.strip('"').strip("'")
    +                if not suffix:
    +                    reader.raise_parse_error("include missing file path")
    +                block = _IncludeBlock(suffix, reader, line)
    +            elif operator == "set":
    +                if not suffix:
    +                    reader.raise_parse_error("set missing statement")
    +                block = _Statement(suffix, line)
    +            elif operator == "autoescape":
    +                fn = suffix.strip()
    +                if fn == "None":
    +                    fn = None
    +                template.autoescape = fn
    +                continue
    +            elif operator == "whitespace":
    +                mode = suffix.strip()
    +                # Validate the selected mode
    +                filter_whitespace(mode, '')
    +                reader.whitespace = mode
    +                continue
    +            elif operator == "raw":
    +                block = _Expression(suffix, line, raw=True)
    +            elif operator == "module":
    +                block = _Module(suffix, line)
    +            body.chunks.append(block)
    +            continue
    +
    +        elif operator in ("apply", "block", "try", "if", "for", "while"):
    +            # parse inner body recursively
    +            if operator in ("for", "while"):
    +                block_body = _parse(reader, template, operator, operator)
    +            elif operator == "apply":
    +                # apply creates a nested function so syntactically it's not
    +                # in the loop.
    +                block_body = _parse(reader, template, operator, None)
    +            else:
    +                block_body = _parse(reader, template, operator, in_loop)
    +
    +            if operator == "apply":
    +                if not suffix:
    +                    reader.raise_parse_error("apply missing method name")
    +                block = _ApplyBlock(suffix, line, block_body)
    +            elif operator == "block":
    +                if not suffix:
    +                    reader.raise_parse_error("block missing name")
    +                block = _NamedBlock(suffix, block_body, template, line)
    +            else:
    +                block = _ControlBlock(contents, line, block_body)
    +            body.chunks.append(block)
    +            continue
    +
    +        elif operator in ("break", "continue"):
    +            if not in_loop:
    +                reader.raise_parse_error("%s outside %s block" %
    +                                         (operator, set(["for", "while"])))
    +            body.chunks.append(_Statement(contents, line))
    +            continue
    +
    +        else:
    +            reader.raise_parse_error("unknown operator: %r" % operator)
    diff --git a/server/www/packages/packages-windows/x86/tornado/testing.py b/server/www/packages/packages-windows/x86/tornado/testing.py
    new file mode 100644
    index 0000000..d6e5e94
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/tornado/testing.py
    @@ -0,0 +1,724 @@
    +"""Support classes for automated testing.
    +
    +* `AsyncTestCase` and `AsyncHTTPTestCase`:  Subclasses of unittest.TestCase
    +  with additional support for testing asynchronous (`.IOLoop`-based) code.
    +
    +* `ExpectLog`: Make test logs less spammy.
    +
    +* `main()`: A simple test runner (wrapper around unittest.main()) with support
    +  for the tornado.autoreload module to rerun the tests when code changes.
    +"""
    +
    +from __future__ import absolute_import, division, print_function
    +
    +try:
    +    from tornado import gen
    +    from tornado.httpclient import AsyncHTTPClient
    +    from tornado.httpserver import HTTPServer
    +    from tornado.simple_httpclient import SimpleAsyncHTTPClient
    +    from tornado.ioloop import IOLoop, TimeoutError
    +    from tornado import netutil
    +    from tornado.process import Subprocess
    +except ImportError:
    +    # These modules are not importable on app engine.  Parts of this module
    +    # won't work, but e.g. main() will.
    +    AsyncHTTPClient = None  # type: ignore
    +    gen = None  # type: ignore
    +    HTTPServer = None  # type: ignore
    +    IOLoop = None  # type: ignore
    +    netutil = None  # type: ignore
    +    SimpleAsyncHTTPClient = None  # type: ignore
    +    Subprocess = None  # type: ignore
    +from tornado.log import app_log
    +from tornado.stack_context import ExceptionStackContext
    +from tornado.util import raise_exc_info, basestring_type, PY3
    +import functools
    +import inspect
    +import logging
    +import os
    +import re
    +import signal
    +import socket
    +import sys
    +
    +try:
    +    import asyncio
    +except ImportError:
    +    asyncio = None
    +
    +
    +try:
    +    from collections.abc import Generator as GeneratorType  # type: ignore
    +except ImportError:
    +    from types import GeneratorType  # type: ignore
    +
    +if sys.version_info >= (3, 5):
    +    iscoroutine = inspect.iscoroutine  # type: ignore
    +    iscoroutinefunction = inspect.iscoroutinefunction  # type: ignore
    +else:
    +    iscoroutine = iscoroutinefunction = lambda f: False
    +
    +# Tornado's own test suite requires the updated unittest module
    +# (either py27+ or unittest2) so tornado.test.util enforces
    +# this requirement, but for other users of tornado.testing we want
    +# to allow the older version if unitest2 is not available.
    +if PY3:
    +    # On python 3, mixing unittest2 and unittest (including doctest)
    +    # doesn't seem to work, so always use unittest.
    +    import unittest
    +else:
    +    # On python 2, prefer unittest2 when available.
    +    try:
    +        import unittest2 as unittest  # type: ignore
    +    except ImportError:
    +        import unittest  # type: ignore
    +
    +
    +if asyncio is None:
    +    _NON_OWNED_IOLOOPS = ()
    +else:
    +    import tornado.platform.asyncio
    +    _NON_OWNED_IOLOOPS = tornado.platform.asyncio.AsyncIOMainLoop
    +
    +
    +def bind_unused_port(reuse_port=False):
    +    """Binds a server socket to an available port on localhost.
    +
    +    Returns a tuple (socket, port).
    +
    +    .. versionchanged:: 4.4
    +       Always binds to ``127.0.0.1`` without resolving the name
    +       ``localhost``.
    +    """
    +    sock = netutil.bind_sockets(None, '127.0.0.1', family=socket.AF_INET,
    +                                reuse_port=reuse_port)[0]
    +    port = sock.getsockname()[1]
    +    return sock, port
    +
    +
    +def get_async_test_timeout():
    +    """Get the global timeout setting for async tests.
    +
    +    Returns a float, the timeout in seconds.
    +
    +    .. versionadded:: 3.1
    +    """
    +    try:
    +        return float(os.environ.get('ASYNC_TEST_TIMEOUT'))
    +    except (ValueError, TypeError):
    +        return 5
    +
    +
    +class _TestMethodWrapper(object):
    +    """Wraps a test method to raise an error if it returns a value.
    +
    +    This is mainly used to detect undecorated generators (if a test
    +    method yields it must use a decorator to consume the generator),
    +    but will also detect other kinds of return values (these are not
    +    necessarily errors, but we alert anyway since there is no good
    +    reason to return a value from a test).
    +    """
    +    def __init__(self, orig_method):
    +        self.orig_method = orig_method
    +
    +    def __call__(self, *args, **kwargs):
    +        result = self.orig_method(*args, **kwargs)
    +        if isinstance(result, GeneratorType) or iscoroutine(result):
    +            raise TypeError("Generator and coroutine test methods should be"
    +                            " decorated with tornado.testing.gen_test")
    +        elif result is not None:
    +            raise ValueError("Return value from test method ignored: %r" %
    +                             result)
    +
    +    def __getattr__(self, name):
    +        """Proxy all unknown attributes to the original method.
    +
    +        This is important for some of the decorators in the `unittest`
    +        module, such as `unittest.skipIf`.
    +        """
    +        return getattr(self.orig_method, name)
    +
    +
    +class AsyncTestCase(unittest.TestCase):
    +    """`~unittest.TestCase` subclass for testing `.IOLoop`-based
    +    asynchronous code.
    +
    +    The unittest framework is synchronous, so the test must be
    +    complete by the time the test method returns. This means that
    +    asynchronous code cannot be used in quite the same way as usual
    +    and must be adapted to fit. To write your tests with coroutines,
    +    decorate your test methods with `tornado.testing.gen_test` instead
    +    of `tornado.gen.coroutine`.
    +
    +    This class also provides the (deprecated) `stop()` and `wait()`
    +    methods for a more manual style of testing. The test method itself
    +    must call ``self.wait()``, and asynchronous callbacks should call
    +    ``self.stop()`` to signal completion.
    +
    +    By default, a new `.IOLoop` is constructed for each test and is available
    +    as ``self.io_loop``.  If the code being tested requires a
    +    global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
    +
    +    The `.IOLoop`'s ``start`` and ``stop`` methods should not be
    +    called directly.  Instead, use `self.stop ` and `self.wait
    +    `.  Arguments passed to ``self.stop`` are returned from
    +    ``self.wait``.  It is possible to have multiple ``wait``/``stop``
    +    cycles in the same test.
    +
    +    Example::
    +
    +        # This test uses coroutine style.
    +        class MyTestCase(AsyncTestCase):
    +            @tornado.testing.gen_test
    +            def test_http_fetch(self):
    +                client = AsyncHTTPClient()
    +                response = yield client.fetch("http://www.tornadoweb.org")
    +                # Test contents of response
    +                self.assertIn("FriendFeed", response.body)
    +
    +        # This test uses argument passing between self.stop and self.wait.
    +        class MyTestCase2(AsyncTestCase):
    +            def test_http_fetch(self):
    +                client = AsyncHTTPClient()
    +                client.fetch("http://www.tornadoweb.org/", self.stop)
    +                response = self.wait()
    +                # Test contents of response
    +                self.assertIn("FriendFeed", response.body)
    +    """
    +    def __init__(self, methodName='runTest'):
    +        super(AsyncTestCase, self).__init__(methodName)
    +        self.__stopped = False
    +        self.__running = False
    +        self.__failure = None
    +        self.__stop_args = None
    +        self.__timeout = None
    +
    +        # It's easy to forget the @gen_test decorator, but if you do
    +        # the test will silently be ignored because nothing will consume
    +        # the generator.  Replace the test method with a wrapper that will
    +        # make sure it's not an undecorated generator.
    +        setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
    +
    +    def setUp(self):
    +        super(AsyncTestCase, self).setUp()
    +        self.io_loop = self.get_new_ioloop()
    +        self.io_loop.make_current()
    +
    +    def tearDown(self):
    +        # Clean up Subprocess, so it can be used again with a new ioloop.
    +        Subprocess.uninitialize()
    +        self.io_loop.clear_current()
    +        if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS):
    +            # Try to clean up any file descriptors left open in the ioloop.
    +            # This avoids leaks, especially when tests are run repeatedly
    +            # in the same process with autoreload (because curl does not
    +            # set FD_CLOEXEC on its file descriptors)
    +            self.io_loop.close(all_fds=True)
    +        super(AsyncTestCase, self).tearDown()
    +        # In case an exception escaped or the StackContext caught an exception
    +        # when there wasn't a wait() to re-raise it, do so here.
    +        # This is our last chance to raise an exception in a way that the
    +        # unittest machinery understands.
    +        self.__rethrow()
    +
    +    def get_new_ioloop(self):
    +        """Returns the `.IOLoop` to use for this test.
    +
    +        By default, a new `.IOLoop` is created for each test.
    +        Subclasses may override this method to return
    +        `.IOLoop.current()` if it is not appropriate to use a new
    +        `.IOLoop` in each tests (for example, if there are global
    +        singletons using the default `.IOLoop`) or if a per-test event
    +        loop is being provided by another system (such as
    +        ``pytest-asyncio``).
    +        """
    +        return IOLoop()
    +
    +    def _handle_exception(self, typ, value, tb):
    +        if self.__failure is None:
    +            self.__failure = (typ, value, tb)
    +        else:
    +            app_log.error("multiple unhandled exceptions in test",
    +                          exc_info=(typ, value, tb))
    +        self.stop()
    +        return True
    +
    +    def __rethrow(self):
    +        if self.__failure is not None:
    +            failure = self.__failure
    +            self.__failure = None
    +            raise_exc_info(failure)
    +
    +    def run(self, result=None):
    +        with ExceptionStackContext(self._handle_exception, delay_warning=True):
    +            super(AsyncTestCase, self).run(result)
    +        # As a last resort, if an exception escaped super.run() and wasn't
    +        # re-raised in tearDown, raise it here.  This will cause the
    +        # unittest run to fail messily, but that's better than silently
    +        # ignoring an error.
    +        self.__rethrow()
    +
    +    def stop(self, _arg=None, **kwargs):
    +        """Stops the `.IOLoop`, causing one pending (or future) call to `wait()`
    +        to return.
    +
    +        Keyword arguments or a single positional argument passed to `stop()` are
    +        saved and will be returned by `wait()`.
    +
    +        .. deprecated:: 5.1
    +
    +           `stop` and `wait` are deprecated; use ``@gen_test`` instead.
    +        """
    +        assert _arg is None or not kwargs
    +        self.__stop_args = kwargs or _arg
    +        if self.__running:
    +            self.io_loop.stop()
    +            self.__running = False
    +        self.__stopped = True
    +
    +    def wait(self, condition=None, timeout=None):
    +        """Runs the `.IOLoop` until stop is called or timeout has passed.
    +
    +        In the event of a timeout, an exception will be thrown. The
    +        default timeout is 5 seconds; it may be overridden with a
    +        ``timeout`` keyword argument or globally with the
    +        ``ASYNC_TEST_TIMEOUT`` environment variable.
    +
    +        If ``condition`` is not None, the `.IOLoop` will be restarted
    +        after `stop()` until ``condition()`` returns true.
    +
    +        .. versionchanged:: 3.1
    +           Added the ``ASYNC_TEST_TIMEOUT`` environment variable.
    +
    +        .. deprecated:: 5.1
    +
    +           `stop` and `wait` are deprecated; use ``@gen_test`` instead.
    +        """
    +        if timeout is None:
    +            timeout = get_async_test_timeout()
    +
    +        if not self.__stopped:
    +            if timeout:
    +                def timeout_func():
    +                    try:
    +                        raise self.failureException(
    +                            'Async operation timed out after %s seconds' %
    +                            timeout)
    +                    except Exception:
    +                        self.__failure = sys.exc_info()
    +                    self.stop()
    +                self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
    +                                                          timeout_func)
    +            while True:
    +                self.__running = True
    +                self.io_loop.start()
    +                if (self.__failure is not None or
    +                        condition is None or condition()):
    +                    break
    +            if self.__timeout is not None:
    +                self.io_loop.remove_timeout(self.__timeout)
    +                self.__timeout = None
    +        assert self.__stopped
    +        self.__stopped = False
    +        self.__rethrow()
    +        result = self.__stop_args
    +        self.__stop_args = None
    +        return result
    +
    +
    +class AsyncHTTPTestCase(AsyncTestCase):
    +    """A test case that starts up an HTTP server.
    +
    +    Subclasses must override `get_app()`, which returns the
    +    `tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
    +    Tests will typically use the provided ``self.http_client`` to fetch
    +    URLs from this server.
    +
    +    Example, assuming the "Hello, world" example from the user guide is in
    +    ``hello.py``::
    +
    +        import hello
    +
    +        class TestHelloApp(AsyncHTTPTestCase):
    +            def get_app(self):
    +                return hello.make_app()
    +
    +            def test_homepage(self):
    +                response = self.fetch('/')
    +                self.assertEqual(response.code, 200)
    +                self.assertEqual(response.body, 'Hello, world')
    +
    +    That call to ``self.fetch()`` is equivalent to ::
    +
    +        self.http_client.fetch(self.get_url('/'), self.stop)
    +        response = self.wait()
    +
    +    which illustrates how AsyncTestCase can turn an asynchronous operation,
    +    like ``http_client.fetch()``, into a synchronous operation. If you need
    +    to do other asynchronous operations in tests, you'll probably need to use
    +    ``stop()`` and ``wait()`` yourself.
    +    """
    +    def setUp(self):
    +        super(AsyncHTTPTestCase, self).setUp()
    +        sock, port = bind_unused_port()
    +        self.__port = port
    +
    +        self.http_client = self.get_http_client()
    +        self._app = self.get_app()
    +        self.http_server = self.get_http_server()
    +        self.http_server.add_sockets([sock])
    +
    +    def get_http_client(self):
    +        return AsyncHTTPClient()
    +
    +    def get_http_server(self):
    +        return HTTPServer(self._app, **self.get_httpserver_options())
    +
    +    def get_app(self):
    +        """Should be overridden by subclasses to return a
    +        `tornado.web.Application` or other `.HTTPServer` callback.
    +        """
    +        raise NotImplementedError()
    +
    +    def fetch(self, path, raise_error=False, **kwargs):
    +        """Convenience method to synchronously fetch a URL.
    +
    +        The given path will be appended to the local server's host and
    +        port.  Any additional kwargs will be passed directly to
    +        `.AsyncHTTPClient.fetch` (and so could be used to pass
    +        ``method="POST"``, ``body="..."``, etc).
    +
    +        If the path begins with http:// or https://, it will be treated as a
    +        full URL and will be fetched as-is.
    +
    +        If ``raise_error`` is True, a `tornado.httpclient.HTTPError` will
    +        be raised if the response code is not 200. This is the same behavior
    +        as the ``raise_error`` argument to `.AsyncHTTPClient.fetch`, but
    +        the default is False here (it's True in `.AsyncHTTPClient`) because
    +        tests often need to deal with non-200 response codes.
    +
    +        .. versionchanged:: 5.0
    +           Added support for absolute URLs.
    +
    +        .. versionchanged:: 5.1
    +
    +           Added the ``raise_error`` argument.
    +
    +        .. deprecated:: 5.1
    +
    +           This method currently turns any exception into an
    +           `.HTTPResponse` with status code 599. In Tornado 6.0,
    +           errors other than `tornado.httpclient.HTTPError` will be
    +           passed through, and ``raise_error=False`` will only
    +           suppress errors that would be raised due to non-200
    +           response codes.
    +
    +        """
    +        if path.lower().startswith(('http://', 'https://')):
    +            url = path
    +        else:
    +            url = self.get_url(path)
    +        return self.io_loop.run_sync(
    +            lambda: self.http_client.fetch(url, raise_error=raise_error, **kwargs),
    +            timeout=get_async_test_timeout())
    +
    +    def get_httpserver_options(self):
    +        """May be overridden by subclasses to return additional
    +        keyword arguments for the server.
    +        """
    +        return {}
    +
    +    def get_http_port(self):
    +        """Returns the port used by the server.
    +
    +        A new port is chosen for each test.
    +        """
    +        return self.__port
    +
    +    def get_protocol(self):
    +        return 'http'
    +
    +    def get_url(self, path):
    +        """Returns an absolute url for the given path on the test server."""
    +        return '%s://127.0.0.1:%s%s' % (self.get_protocol(),
    +                                        self.get_http_port(), path)
    +
    +    def tearDown(self):
    +        self.http_server.stop()
    +        self.io_loop.run_sync(self.http_server.close_all_connections,
    +                              timeout=get_async_test_timeout())
    +        self.http_client.close()
    +        super(AsyncHTTPTestCase, self).tearDown()
    +
    +
    +class AsyncHTTPSTestCase(AsyncHTTPTestCase):
    +    """A test case that starts an HTTPS server.
    +
    +    Interface is generally the same as `AsyncHTTPTestCase`.
    +    """
    +    def get_http_client(self):
    +        return AsyncHTTPClient(force_instance=True,
    +                               defaults=dict(validate_cert=False))
    +
    +    def get_httpserver_options(self):
    +        return dict(ssl_options=self.get_ssl_options())
    +
    +    def get_ssl_options(self):
    +        """May be overridden by subclasses to select SSL options.
    +
    +        By default includes a self-signed testing certificate.
    +        """
    +        # Testing keys were generated with:
    +        # openssl req -new -keyout tornado/test/test.key \
    +        #                     -out tornado/test/test.crt -nodes -days 3650 -x509
    +        module_dir = os.path.dirname(__file__)
    +        return dict(
    +            certfile=os.path.join(module_dir, 'test', 'test.crt'),
    +            keyfile=os.path.join(module_dir, 'test', 'test.key'))
    +
    +    def get_protocol(self):
    +        return 'https'
    +
    +
    +def gen_test(func=None, timeout=None):
    +    """Testing equivalent of ``@gen.coroutine``, to be applied to test methods.
    +
    +    ``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not
    +    already running.  ``@gen_test`` should be applied to test methods
    +    on subclasses of `AsyncTestCase`.
    +
    +    Example::
    +
    +        class MyTest(AsyncHTTPTestCase):
    +            @gen_test
    +            def test_something(self):
    +                response = yield self.http_client.fetch(self.get_url('/'))
    +
    +    By default, ``@gen_test`` times out after 5 seconds. The timeout may be
    +    overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
    +    or for each test with the ``timeout`` keyword argument::
    +
    +        class MyTest(AsyncHTTPTestCase):
    +            @gen_test(timeout=10)
    +            def test_something_slow(self):
    +                response = yield self.http_client.fetch(self.get_url('/'))
    +
    +    Note that ``@gen_test`` is incompatible with `AsyncTestCase.stop`,
    +    `AsyncTestCase.wait`, and `AsyncHTTPTestCase.fetch`. Use ``yield
    +    self.http_client.fetch(self.get_url())`` as shown above instead.
    +
    +    .. versionadded:: 3.1
    +       The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
    +       variable.
    +
    +    .. versionchanged:: 4.0
    +       The wrapper now passes along ``*args, **kwargs`` so it can be used
    +       on functions with arguments.
    +
    +    """
    +    if timeout is None:
    +        timeout = get_async_test_timeout()
    +
    +    def wrap(f):
    +        # Stack up several decorators to allow us to access the generator
    +        # object itself.  In the innermost wrapper, we capture the generator
    +        # and save it in an attribute of self.  Next, we run the wrapped
    +        # function through @gen.coroutine.  Finally, the coroutine is
    +        # wrapped again to make it synchronous with run_sync.
    +        #
    +        # This is a good case study arguing for either some sort of
    +        # extensibility in the gen decorators or cancellation support.
    +        @functools.wraps(f)
    +        def pre_coroutine(self, *args, **kwargs):
    +            result = f(self, *args, **kwargs)
    +            if isinstance(result, GeneratorType) or iscoroutine(result):
    +                self._test_generator = result
    +            else:
    +                self._test_generator = None
    +            return result
    +
    +        if iscoroutinefunction(f):
    +            coro = pre_coroutine
    +        else:
    +            coro = gen.coroutine(pre_coroutine)
    +
    +        @functools.wraps(coro)
    +        def post_coroutine(self, *args, **kwargs):
    +            try:
    +                return self.io_loop.run_sync(
    +                    functools.partial(coro, self, *args, **kwargs),
    +                    timeout=timeout)
    +            except TimeoutError as e:
    +                # run_sync raises an error with an unhelpful traceback.
    +                # If the underlying generator is still running, we can throw the
    +                # exception back into it so the stack trace is replaced by the
    +                # point where the test is stopped. The only reason the generator
    +                # would not be running would be if it were cancelled, which means
    +                # a native coroutine, so we can rely on the cr_running attribute.
    +                if getattr(self._test_generator, 'cr_running', True):
    +                    self._test_generator.throw(e)
    +                    # In case the test contains an overly broad except
    +                    # clause, we may get back here.
    +                # Coroutine was stopped or didn't raise a useful stack trace,
    +                # so re-raise the original exception which is better than nothing.
    +                raise
    +        return post_coroutine
    +
    +    if func is not None:
    +        # Used like:
    +        #     @gen_test
    +        #     def f(self):
    +        #         pass
    +        return wrap(func)
    +    else:
    +        # Used like @gen_test(timeout=10)
    +        return wrap
    +
    +
    +# Without this attribute, nosetests will try to run gen_test as a test
    +# anywhere it is imported.
    +gen_test.__test__ = False  # type: ignore
    +
    +
    +class ExpectLog(logging.Filter):
    +    """Context manager to capture and suppress expected log output.
    +
    +    Useful to make tests of error conditions less noisy, while still
    +    leaving unexpected log entries visible.  *Not thread safe.*
    +
    +    The attribute ``logged_stack`` is set to true if any exception
    +    stack trace was logged.
    +
    +    Usage::
    +
    +        with ExpectLog('tornado.application', "Uncaught exception"):
    +            error_response = self.fetch("/some_page")
    +
    +    .. versionchanged:: 4.3
    +       Added the ``logged_stack`` attribute.
    +    """
    +    def __init__(self, logger, regex, required=True):
    +        """Constructs an ExpectLog context manager.
    +
    +        :param logger: Logger object (or name of logger) to watch.  Pass
    +            an empty string to watch the root logger.
    +        :param regex: Regular expression to match.  Any log entries on
    +            the specified logger that match this regex will be suppressed.
    +        :param required: If true, an exception will be raised if the end of
    +            the ``with`` statement is reached without matching any log entries.
    +        """
    +        if isinstance(logger, basestring_type):
    +            logger = logging.getLogger(logger)
    +        self.logger = logger
    +        self.regex = re.compile(regex)
    +        self.required = required
    +        self.matched = False
    +        self.logged_stack = False
    +
    +    def filter(self, record):
    +        if record.exc_info:
    +            self.logged_stack = True
    +        message = record.getMessage()
    +        if self.regex.match(message):
    +            self.matched = True
    +            return False
    +        return True
    +
    +    def __enter__(self):
    +        self.logger.addFilter(self)
    +        return self
    +
    +    def __exit__(self, typ, value, tb):
    +        self.logger.removeFilter(self)
    +        if not typ and self.required and not self.matched:
    +            raise Exception("did not get expected log message")
    +
    +
    +def main(**kwargs):
    +    """A simple test runner.
    +
    +    This test runner is essentially equivalent to `unittest.main` from
    +    the standard library, but adds support for tornado-style option
    +    parsing and log formatting. It is *not* necessary to use this
    +    `main` function to run tests using `AsyncTestCase`; these tests
    +    are self-contained and can run with any test runner.
    +
    +    The easiest way to run a test is via the command line::
    +
    +        python -m tornado.testing tornado.test.stack_context_test
    +
    +    See the standard library unittest module for ways in which tests can
    +    be specified.
    +
    +    Projects with many tests may wish to define a test script like
    +    ``tornado/test/runtests.py``.  This script should define a method
    +    ``all()`` which returns a test suite and then call
    +    `tornado.testing.main()`.  Note that even when a test script is
    +    used, the ``all()`` test suite may be overridden by naming a
    +    single test on the command line::
    +
    +        # Runs all tests
    +        python -m tornado.test.runtests
    +        # Runs one test
    +        python -m tornado.test.runtests tornado.test.stack_context_test
    +
    +    Additional keyword arguments passed through to ``unittest.main()``.
    +    For example, use ``tornado.testing.main(verbosity=2)``
    +    to show many test details as they are run.
    +    See http://docs.python.org/library/unittest.html#unittest.main
    +    for full argument list.
    +
    +    .. versionchanged:: 5.0
    +
    +       This function produces no output of its own; only that produced
    +       by the `unittest` module (Previously it would add a PASS or FAIL
    +       log message).
    +    """
    +    from tornado.options import define, options, parse_command_line
    +
    +    define('exception_on_interrupt', type=bool, default=True,
    +           help=("If true (default), ctrl-c raises a KeyboardInterrupt "
    +                 "exception.  This prints a stack trace but cannot interrupt "
    +                 "certain operations.  If false, the process is more reliably "
    +                 "killed, but does not print a stack trace."))
    +
    +    # support the same options as unittest's command-line interface
    +    define('verbose', type=bool)
    +    define('quiet', type=bool)
    +    define('failfast', type=bool)
    +    define('catch', type=bool)
    +    define('buffer', type=bool)
    +
    +    argv = [sys.argv[0]] + parse_command_line(sys.argv)
    +
    +    if not options.exception_on_interrupt:
    +        signal.signal(signal.SIGINT, signal.SIG_DFL)
    +
    +    if options.verbose is not None:
    +        kwargs['verbosity'] = 2
    +    if options.quiet is not None:
    +        kwargs['verbosity'] = 0
    +    if options.failfast is not None:
    +        kwargs['failfast'] = True
    +    if options.catch is not None:
    +        kwargs['catchbreak'] = True
    +    if options.buffer is not None:
    +        kwargs['buffer'] = True
    +
    +    if __name__ == '__main__' and len(argv) == 1:
    +        print("No tests specified", file=sys.stderr)
    +        sys.exit(1)
    +    # In order to be able to run tests by their fully-qualified name
    +    # on the command line without importing all tests here,
    +    # module must be set to None.  Python 3.2's unittest.main ignores
    +    # defaultTest if no module is given (it tries to do its own
    +    # test discovery, which is incompatible with auto2to3), so don't
    +    # set module if we're not asking for a specific test.
    +    if len(argv) > 1:
    +        unittest.main(module=None, argv=argv, **kwargs)
    +    else:
    +        unittest.main(defaultTest="all", argv=argv, **kwargs)
    +
    +
    +if __name__ == '__main__':
    +    main()
    diff --git a/server/www/packages/packages-windows/x86/tornado/util.py b/server/www/packages/packages-windows/x86/tornado/util.py
    new file mode 100644
    index 0000000..a42ebeb
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/tornado/util.py
    @@ -0,0 +1,497 @@
    +"""Miscellaneous utility functions and classes.
    +
    +This module is used internally by Tornado.  It is not necessarily expected
    +that the functions and classes defined here will be useful to other
    +applications, but they are documented here in case they are.
    +
    +The one public-facing part of this module is the `Configurable` class
    +and its `~Configurable.configure` method, which becomes a part of the
    +interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
    +and `.Resolver`.
    +"""
    +
    +from __future__ import absolute_import, division, print_function
    +
    +import array
    +import atexit
    +import os
    +import re
    +import sys
    +import zlib
    +
    +PY3 = sys.version_info >= (3,)
    +
    +if PY3:
    +    xrange = range
    +
    +# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
    +# The two functions have compatible interfaces for the parts we need.
    +if PY3:
    +    from inspect import getfullargspec as getargspec
    +else:
    +    from inspect import getargspec
    +
    +# Aliases for types that are spelled differently in different Python
    +# versions. bytes_type is deprecated and no longer used in Tornado
    +# itself but is left in case anyone outside Tornado is using it.
    +bytes_type = bytes
    +if PY3:
    +    unicode_type = str
    +    basestring_type = str
    +else:
    +    # The names unicode and basestring don't exist in py3 so silence flake8.
    +    unicode_type = unicode  # noqa
    +    basestring_type = basestring  # noqa
    +
    +
    +try:
    +    import typing  # noqa
    +    from typing import cast
    +
    +    _ObjectDictBase = typing.Dict[str, typing.Any]
    +except ImportError:
    +    _ObjectDictBase = dict
    +
    +    def cast(typ, x):
    +        return x
    +else:
    +    # More imports that are only needed in type comments.
    +    import datetime  # noqa
    +    import types  # noqa
    +    from typing import Any, AnyStr, Union, Optional, Dict, Mapping  # noqa
    +    from typing import Tuple, Match, Callable  # noqa
    +
    +    if PY3:
    +        _BaseString = str
    +    else:
    +        _BaseString = Union[bytes, unicode_type]
    +
    +
    +try:
    +    from sys import is_finalizing
    +except ImportError:
    +    # Emulate it
    +    def _get_emulated_is_finalizing():
    +        L = []
    +        atexit.register(lambda: L.append(None))
    +
    +        def is_finalizing():
    +            # Not referencing any globals here
    +            return L != []
    +
    +        return is_finalizing
    +
    +    is_finalizing = _get_emulated_is_finalizing()
    +
    +
    +class TimeoutError(Exception):
    +    """Exception raised by `.with_timeout` and `.IOLoop.run_sync`.
    +
    +    .. versionchanged:: 5.0:
    +       Unified ``tornado.gen.TimeoutError`` and
    +       ``tornado.ioloop.TimeoutError`` as ``tornado.util.TimeoutError``.
    +       Both former names remain as aliases.
    +    """
    +
    +
    +class ObjectDict(_ObjectDictBase):
    +    """Makes a dictionary behave like an object, with attribute-style access.
    +    """
    +    def __getattr__(self, name):
    +        # type: (str) -> Any
    +        try:
    +            return self[name]
    +        except KeyError:
    +            raise AttributeError(name)
    +
    +    def __setattr__(self, name, value):
    +        # type: (str, Any) -> None
    +        self[name] = value
    +
    +
    +class GzipDecompressor(object):
    +    """Streaming gzip decompressor.
    +
    +    The interface is like that of `zlib.decompressobj` (without some of the
    +    optional arguments, but it understands gzip headers and checksums.
    +    """
    +    def __init__(self):
    +        # Magic parameter makes zlib module understand gzip header
    +        # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
    +        # This works on cpython and pypy, but not jython.
    +        self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
    +
    +    def decompress(self, value, max_length=None):
    +        # type: (bytes, Optional[int]) -> bytes
    +        """Decompress a chunk, returning newly-available data.
    +
    +        Some data may be buffered for later processing; `flush` must
    +        be called when there is no more input data to ensure that
    +        all data was processed.
    +
    +        If ``max_length`` is given, some input data may be left over
    +        in ``unconsumed_tail``; you must retrieve this value and pass
    +        it back to a future call to `decompress` if it is not empty.
    +        """
    +        return self.decompressobj.decompress(value, max_length)
    +
    +    @property
    +    def unconsumed_tail(self):
    +        # type: () -> bytes
    +        """Returns the unconsumed portion left over
    +        """
    +        return self.decompressobj.unconsumed_tail
    +
    +    def flush(self):
    +        # type: () -> bytes
    +        """Return any remaining buffered data not yet returned by decompress.
    +
    +        Also checks for errors such as truncated input.
    +        No other methods may be called on this object after `flush`.
    +        """
    +        return self.decompressobj.flush()
    +
    +
    +def import_object(name):
    +    # type: (_BaseString) -> Any
    +    """Imports an object by name.
    +
    +    import_object('x') is equivalent to 'import x'.
    +    import_object('x.y.z') is equivalent to 'from x.y import z'.
    +
    +    >>> import tornado.escape
    +    >>> import_object('tornado.escape') is tornado.escape
    +    True
    +    >>> import_object('tornado.escape.utf8') is tornado.escape.utf8
    +    True
    +    >>> import_object('tornado') is tornado
    +    True
    +    >>> import_object('tornado.missing_module')
    +    Traceback (most recent call last):
    +        ...
    +    ImportError: No module named missing_module
    +    """
    +    if not isinstance(name, str):
    +        # on python 2 a byte string is required.
    +        name = name.encode('utf-8')
    +    if name.count('.') == 0:
    +        return __import__(name, None, None)
    +
    +    parts = name.split('.')
    +    obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
    +    try:
    +        return getattr(obj, parts[-1])
    +    except AttributeError:
    +        raise ImportError("No module named %s" % parts[-1])
    +
    +
    +# Stubs to make mypy happy (and later for actual type-checking).
    +def raise_exc_info(exc_info):
    +    # type: (Tuple[type, BaseException, types.TracebackType]) -> None
    +    pass
    +
    +
    +def exec_in(code, glob, loc=None):
    +    # type: (Any, Dict[str, Any], Optional[Mapping[str, Any]]) -> Any
    +    if isinstance(code, basestring_type):
    +        # exec(string) inherits the caller's future imports; compile
    +        # the string first to prevent that.
    +        code = compile(code, '', 'exec', dont_inherit=True)
    +    exec(code, glob, loc)
    +
    +
    +if PY3:
    +    exec("""
    +def raise_exc_info(exc_info):
    +    try:
    +        raise exc_info[1].with_traceback(exc_info[2])
    +    finally:
    +        exc_info = None
    +
    +""")
    +else:
    +    exec("""
    +def raise_exc_info(exc_info):
    +    raise exc_info[0], exc_info[1], exc_info[2]
    +""")
    +
    +
    +def errno_from_exception(e):
    +    # type: (BaseException) -> Optional[int]
    +    """Provides the errno from an Exception object.
    +
    +    There are cases that the errno attribute was not set so we pull
    +    the errno out of the args but if someone instantiates an Exception
    +    without any args you will get a tuple error. So this function
    +    abstracts all that behavior to give you a safe way to get the
    +    errno.
    +    """
    +
    +    if hasattr(e, 'errno'):
    +        return e.errno  # type: ignore
    +    elif e.args:
    +        return e.args[0]
    +    else:
    +        return None
    +
    +
    +_alphanum = frozenset(
    +    "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
    +
    +
    +def _re_unescape_replacement(match):
    +    # type: (Match[str]) -> str
    +    group = match.group(1)
    +    if group[0] in _alphanum:
    +        raise ValueError("cannot unescape '\\\\%s'" % group[0])
    +    return group
    +
    +
    +_re_unescape_pattern = re.compile(r'\\(.)', re.DOTALL)
    +
    +
    +def re_unescape(s):
    +    # type: (str) -> str
    +    """Unescape a string escaped by `re.escape`.
    +
    +    May raise ``ValueError`` for regular expressions which could not
    +    have been produced by `re.escape` (for example, strings containing
    +    ``\d`` cannot be unescaped).
    +
    +    .. versionadded:: 4.4
    +    """
    +    return _re_unescape_pattern.sub(_re_unescape_replacement, s)
    +
    +
    +class Configurable(object):
    +    """Base class for configurable interfaces.
    +
    +    A configurable interface is an (abstract) class whose constructor
    +    acts as a factory function for one of its implementation subclasses.
    +    The implementation subclass as well as optional keyword arguments to
    +    its initializer can be set globally at runtime with `configure`.
    +
    +    By using the constructor as the factory method, the interface
    +    looks like a normal class, `isinstance` works as usual, etc.  This
    +    pattern is most useful when the choice of implementation is likely
    +    to be a global decision (e.g. when `~select.epoll` is available,
    +    always use it instead of `~select.select`), or when a
    +    previously-monolithic class has been split into specialized
    +    subclasses.
    +
    +    Configurable subclasses must define the class methods
    +    `configurable_base` and `configurable_default`, and use the instance
    +    method `initialize` instead of ``__init__``.
    +
    +    .. versionchanged:: 5.0
    +
    +       It is now possible for configuration to be specified at
    +       multiple levels of a class hierarchy.
    +
    +    """
    +    __impl_class = None  # type: type
    +    __impl_kwargs = None  # type: Dict[str, Any]
    +
    +    def __new__(cls, *args, **kwargs):
    +        base = cls.configurable_base()
    +        init_kwargs = {}
    +        if cls is base:
    +            impl = cls.configured_class()
    +            if base.__impl_kwargs:
    +                init_kwargs.update(base.__impl_kwargs)
    +        else:
    +            impl = cls
    +        init_kwargs.update(kwargs)
    +        if impl.configurable_base() is not base:
    +            # The impl class is itself configurable, so recurse.
    +            return impl(*args, **init_kwargs)
    +        instance = super(Configurable, cls).__new__(impl)
    +        # initialize vs __init__ chosen for compatibility with AsyncHTTPClient
    +        # singleton magic.  If we get rid of that we can switch to __init__
    +        # here too.
    +        instance.initialize(*args, **init_kwargs)
    +        return instance
    +
    +    @classmethod
    +    def configurable_base(cls):
    +        # type: () -> Any
    +        # TODO: This class needs https://github.com/python/typing/issues/107
    +        # to be fully typeable.
    +        """Returns the base class of a configurable hierarchy.
    +
    +        This will normally return the class in which it is defined.
    +        (which is *not* necessarily the same as the cls classmethod parameter).
    +        """
    +        raise NotImplementedError()
    +
    +    @classmethod
    +    def configurable_default(cls):
    +        # type: () -> type
    +        """Returns the implementation class to be used if none is configured."""
    +        raise NotImplementedError()
    +
    +    def initialize(self):
    +        # type: () -> None
    +        """Initialize a `Configurable` subclass instance.
    +
    +        Configurable classes should use `initialize` instead of ``__init__``.
    +
    +        .. versionchanged:: 4.2
    +           Now accepts positional arguments in addition to keyword arguments.
    +        """
    +
    +    @classmethod
    +    def configure(cls, impl, **kwargs):
    +        # type: (Any, **Any) -> None
    +        """Sets the class to use when the base class is instantiated.
    +
    +        Keyword arguments will be saved and added to the arguments passed
    +        to the constructor.  This can be used to set global defaults for
    +        some parameters.
    +        """
    +        base = cls.configurable_base()
    +        if isinstance(impl, (str, unicode_type)):
    +            impl = import_object(impl)
    +        if impl is not None and not issubclass(impl, cls):
    +            raise ValueError("Invalid subclass of %s" % cls)
    +        base.__impl_class = impl
    +        base.__impl_kwargs = kwargs
    +
    +    @classmethod
    +    def configured_class(cls):
    +        # type: () -> type
    +        """Returns the currently configured class."""
    +        base = cls.configurable_base()
    +        # Manually mangle the private name to see whether this base
    +        # has been configured (and not another base higher in the
    +        # hierarchy).
    +        if base.__dict__.get('_Configurable__impl_class') is None:
    +            base.__impl_class = cls.configurable_default()
    +        return base.__impl_class
    +
    +    @classmethod
    +    def _save_configuration(cls):
    +        # type: () -> Tuple[type, Dict[str, Any]]
    +        base = cls.configurable_base()
    +        return (base.__impl_class, base.__impl_kwargs)
    +
    +    @classmethod
    +    def _restore_configuration(cls, saved):
    +        # type: (Tuple[type, Dict[str, Any]]) -> None
    +        base = cls.configurable_base()
    +        base.__impl_class = saved[0]
    +        base.__impl_kwargs = saved[1]
    +
    +
    +class ArgReplacer(object):
    +    """Replaces one value in an ``args, kwargs`` pair.
    +
    +    Inspects the function signature to find an argument by name
    +    whether it is passed by position or keyword.  For use in decorators
    +    and similar wrappers.
    +    """
    +    def __init__(self, func, name):
    +        # type: (Callable, str) -> None
    +        self.name = name
    +        try:
    +            self.arg_pos = self._getargnames(func).index(name)
    +        except ValueError:
    +            # Not a positional parameter
    +            self.arg_pos = None
    +
    +    def _getargnames(self, func):
    +        # type: (Callable) -> List[str]
    +        try:
    +            return getargspec(func).args
    +        except TypeError:
    +            if hasattr(func, 'func_code'):
    +                # Cython-generated code has all the attributes needed
    +                # by inspect.getargspec, but the inspect module only
    +                # works with ordinary functions. Inline the portion of
    +                # getargspec that we need here. Note that for static
    +                # functions the @cython.binding(True) decorator must
    +                # be used (for methods it works out of the box).
    +                code = func.func_code  # type: ignore
    +                return code.co_varnames[:code.co_argcount]
    +            raise
    +
    +    def get_old_value(self, args, kwargs, default=None):
    +        # type: (List[Any], Dict[str, Any], Any) -> Any
    +        """Returns the old value of the named argument without replacing it.
    +
    +        Returns ``default`` if the argument is not present.
    +        """
    +        if self.arg_pos is not None and len(args) > self.arg_pos:
    +            return args[self.arg_pos]
    +        else:
    +            return kwargs.get(self.name, default)
    +
    +    def replace(self, new_value, args, kwargs):
    +        # type: (Any, List[Any], Dict[str, Any]) -> Tuple[Any, List[Any], Dict[str, Any]]
    +        """Replace the named argument in ``args, kwargs`` with ``new_value``.
    +
    +        Returns ``(old_value, args, kwargs)``.  The returned ``args`` and
    +        ``kwargs`` objects may not be the same as the input objects, or
    +        the input objects may be mutated.
    +
    +        If the named argument was not found, ``new_value`` will be added
    +        to ``kwargs`` and None will be returned as ``old_value``.
    +        """
    +        if self.arg_pos is not None and len(args) > self.arg_pos:
    +            # The arg to replace is passed positionally
    +            old_value = args[self.arg_pos]
    +            args = list(args)  # *args is normally a tuple
    +            args[self.arg_pos] = new_value
    +        else:
    +            # The arg to replace is either omitted or passed by keyword.
    +            old_value = kwargs.get(self.name)
    +            kwargs[self.name] = new_value
    +        return old_value, args, kwargs
    +
    +
    +def timedelta_to_seconds(td):
    +    # type: (datetime.timedelta) -> float
    +    """Equivalent to td.total_seconds() (introduced in python 2.7)."""
    +    return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
    +
    +
    +def _websocket_mask_python(mask, data):
    +    # type: (bytes, bytes) -> bytes
    +    """Websocket masking function.
    +
    +    `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
    +    Returns a `bytes` object of the same length as `data` with the mask applied
    +    as specified in section 5.3 of RFC 6455.
    +
    +    This pure-python implementation may be replaced by an optimized version when available.
    +    """
    +    mask_arr = array.array("B", mask)
    +    unmasked_arr = array.array("B", data)
    +    for i in xrange(len(data)):
    +        unmasked_arr[i] = unmasked_arr[i] ^ mask_arr[i % 4]
    +    if PY3:
    +        # tostring was deprecated in py32.  It hasn't been removed,
    +        # but since we turn on deprecation warnings in our tests
    +        # we need to use the right one.
    +        return unmasked_arr.tobytes()
    +    else:
    +        return unmasked_arr.tostring()
    +
    +
    +if (os.environ.get('TORNADO_NO_EXTENSION') or
    +        os.environ.get('TORNADO_EXTENSION') == '0'):
    +    # These environment variables exist to make it easier to do performance
    +    # comparisons; they are not guaranteed to remain supported in the future.
    +    _websocket_mask = _websocket_mask_python
    +else:
    +    try:
    +        from tornado.speedups import websocket_mask as _websocket_mask
    +    except ImportError:
    +        if os.environ.get('TORNADO_EXTENSION') == '1':
    +            raise
    +        _websocket_mask = _websocket_mask_python
    +
    +
    +def doctests():
    +    import doctest
    +    return doctest.DocTestSuite()
    diff --git a/server/www/packages/packages-windows/x86/tornado/web.py b/server/www/packages/packages-windows/x86/tornado/web.py
    new file mode 100644
    index 0000000..6760b0b
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/tornado/web.py
    @@ -0,0 +1,3394 @@
    +#
    +# Copyright 2009 Facebook
    +#
    +# Licensed under the Apache License, Version 2.0 (the "License"); you may
    +# not use this file except in compliance with the License. You may obtain
    +# a copy of the License at
    +#
    +#     http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +# License for the specific language governing permissions and limitations
    +# under the License.
    +
    +"""``tornado.web`` provides a simple web framework with asynchronous
    +features that allow it to scale to large numbers of open connections,
    +making it ideal for `long polling
    +`_.
    +
    +Here is a simple "Hello, world" example app:
    +
    +.. testcode::
    +
    +    import tornado.ioloop
    +    import tornado.web
    +
    +    class MainHandler(tornado.web.RequestHandler):
    +        def get(self):
    +            self.write("Hello, world")
    +
    +    if __name__ == "__main__":
    +        application = tornado.web.Application([
    +            (r"/", MainHandler),
    +        ])
    +        application.listen(8888)
    +        tornado.ioloop.IOLoop.current().start()
    +
    +.. testoutput::
    +   :hide:
    +
    +
    +See the :doc:`guide` for additional information.
    +
    +Thread-safety notes
    +-------------------
    +
    +In general, methods on `RequestHandler` and elsewhere in Tornado are
    +not thread-safe. In particular, methods such as
    +`~RequestHandler.write()`, `~RequestHandler.finish()`, and
    +`~RequestHandler.flush()` must only be called from the main thread. If
    +you use multiple threads it is important to use `.IOLoop.add_callback`
    +to transfer control back to the main thread before finishing the
    +request, or to limit your use of other threads to
    +`.IOLoop.run_in_executor` and ensure that your callbacks running in
    +the executor do not refer to Tornado objects.
    +
    +"""
    +
    +from __future__ import absolute_import, division, print_function
    +
    +import base64
    +import binascii
    +import datetime
    +import email.utils
    +import functools
    +import gzip
    +import hashlib
    +import hmac
    +import mimetypes
    +import numbers
    +import os.path
    +import re
    +import stat
    +import sys
    +import threading
    +import time
    +import tornado
    +import traceback
    +import types
    +import warnings
    +from inspect import isclass
    +from io import BytesIO
    +
    +from tornado.concurrent import Future, future_set_result_unless_cancelled
    +from tornado import escape
    +from tornado import gen
    +from tornado import httputil
    +from tornado import iostream
    +from tornado import locale
    +from tornado.log import access_log, app_log, gen_log
    +from tornado import stack_context
    +from tornado import template
    +from tornado.escape import utf8, _unicode
    +from tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
    +                             ReversibleRouter, Rule, ReversibleRuleRouter,
    +                             URLSpec)
    +from tornado.util import (ObjectDict, raise_exc_info,
    +                          unicode_type, _websocket_mask, PY3)
    +
    +url = URLSpec
    +
    +if PY3:
    +    import http.cookies as Cookie
    +    import urllib.parse as urlparse
    +    from urllib.parse import urlencode
    +else:
    +    import Cookie
    +    import urlparse
    +    from urllib import urlencode
    +
    +try:
    +    import typing  # noqa
    +
    +    # The following types are accepted by RequestHandler.set_header
    +    # and related methods.
    +    _HeaderTypes = typing.Union[bytes, unicode_type,
    +                                numbers.Integral, datetime.datetime]
    +except ImportError:
    +    pass
    +
    +
    +MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
    +"""The oldest signed value version supported by this version of Tornado.
    +
    +Signed values older than this version cannot be decoded.
    +
    +.. versionadded:: 3.2.1
    +"""
    +
    +MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
    +"""The newest signed value version supported by this version of Tornado.
    +
    +Signed values newer than this version cannot be decoded.
    +
    +.. versionadded:: 3.2.1
    +"""
    +
    +DEFAULT_SIGNED_VALUE_VERSION = 2
    +"""The signed value version produced by `.RequestHandler.create_signed_value`.
    +
    +May be overridden by passing a ``version`` keyword argument.
    +
    +.. versionadded:: 3.2.1
    +"""
    +
    +DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
    +"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
    +
    +May be overridden by passing a ``min_version`` keyword argument.
    +
    +.. versionadded:: 3.2.1
    +"""
    +
    +
    +class RequestHandler(object):
    +    """Base class for HTTP request handlers.
    +
    +    Subclasses must define at least one of the methods defined in the
    +    "Entry points" section below.
    +    """
    +    SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
    +                         "OPTIONS")
    +
    +    _template_loaders = {}  # type: typing.Dict[str, template.BaseLoader]
    +    _template_loader_lock = threading.Lock()
    +    _remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
    +
    +    def __init__(self, application, request, **kwargs):
    +        super(RequestHandler, self).__init__()
    +
    +        self.application = application
    +        self.request = request
    +        self._headers_written = False
    +        self._finished = False
    +        self._auto_finish = True
    +        self._transforms = None  # will be set in _execute
    +        self._prepared_future = None
    +        self._headers = None  # type: httputil.HTTPHeaders
    +        self.path_args = None
    +        self.path_kwargs = None
    +        self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
    +                             application.ui_methods.items())
    +        # UIModules are available as both `modules` and `_tt_modules` in the
    +        # template namespace.  Historically only `modules` was available
    +        # but could be clobbered by user additions to the namespace.
    +        # The template {% module %} directive looks in `_tt_modules` to avoid
    +        # possible conflicts.
    +        self.ui["_tt_modules"] = _UIModuleNamespace(self,
    +                                                    application.ui_modules)
    +        self.ui["modules"] = self.ui["_tt_modules"]
    +        self.clear()
    +        self.request.connection.set_close_callback(self.on_connection_close)
    +        self.initialize(**kwargs)
    +
    +    def initialize(self):
    +        """Hook for subclass initialization. Called for each request.
    +
    +        A dictionary passed as the third argument of a url spec will be
    +        supplied as keyword arguments to initialize().
    +
    +        Example::
    +
    +            class ProfileHandler(RequestHandler):
    +                def initialize(self, database):
    +                    self.database = database
    +
    +                def get(self, username):
    +                    ...
    +
    +            app = Application([
    +                (r'/user/(.*)', ProfileHandler, dict(database=database)),
    +                ])
    +        """
    +        pass
    +
    +    @property
    +    def settings(self):
    +        """An alias for `self.application.settings `."""
    +        return self.application.settings
    +
    +    def head(self, *args, **kwargs):
    +        raise HTTPError(405)
    +
    +    def get(self, *args, **kwargs):
    +        raise HTTPError(405)
    +
    +    def post(self, *args, **kwargs):
    +        raise HTTPError(405)
    +
    +    def delete(self, *args, **kwargs):
    +        raise HTTPError(405)
    +
    +    def patch(self, *args, **kwargs):
    +        raise HTTPError(405)
    +
    +    def put(self, *args, **kwargs):
    +        raise HTTPError(405)
    +
    +    def options(self, *args, **kwargs):
    +        raise HTTPError(405)
    +
    +    def prepare(self):
    +        """Called at the beginning of a request before  `get`/`post`/etc.
    +
    +        Override this method to perform common initialization regardless
    +        of the request method.
    +
    +        Asynchronous support: Decorate this method with `.gen.coroutine`
    +        or use ``async def`` to make it asynchronous (the
    +        `asynchronous` decorator cannot be used on `prepare`).
    +        If this method returns a `.Future` execution will not proceed
    +        until the `.Future` is done.
    +
    +        .. versionadded:: 3.1
    +           Asynchronous support.
    +        """
    +        pass
    +
    +    def on_finish(self):
    +        """Called after the end of a request.
    +
    +        Override this method to perform cleanup, logging, etc.
    +        This method is a counterpart to `prepare`.  ``on_finish`` may
    +        not produce any output, as it is called after the response
    +        has been sent to the client.
    +        """
    +        pass
    +
    +    def on_connection_close(self):
    +        """Called in async handlers if the client closed the connection.
    +
    +        Override this to clean up resources associated with
    +        long-lived connections.  Note that this method is called only if
    +        the connection was closed during asynchronous processing; if you
    +        need to do cleanup after every request override `on_finish`
    +        instead.
    +
    +        Proxies may keep a connection open for a time (perhaps
    +        indefinitely) after the client has gone away, so this method
    +        may not be called promptly after the end user closes their
    +        connection.
    +        """
    +        if _has_stream_request_body(self.__class__):
    +            if not self.request.body.done():
    +                self.request.body.set_exception(iostream.StreamClosedError())
    +                self.request.body.exception()
    +
    +    def clear(self):
    +        """Resets all headers and content for this response."""
    +        self._headers = httputil.HTTPHeaders({
    +            "Server": "TornadoServer/%s" % tornado.version,
    +            "Content-Type": "text/html; charset=UTF-8",
    +            "Date": httputil.format_timestamp(time.time()),
    +        })
    +        self.set_default_headers()
    +        self._write_buffer = []
    +        self._status_code = 200
    +        self._reason = httputil.responses[200]
    +
    +    def set_default_headers(self):
    +        """Override this to set HTTP headers at the beginning of the request.
    +
    +        For example, this is the place to set a custom ``Server`` header.
    +        Note that setting such headers in the normal flow of request
    +        processing may not do what you want, since headers may be reset
    +        during error handling.
    +        """
    +        pass
    +
    +    def set_status(self, status_code, reason=None):
    +        """Sets the status code for our response.
    +
    +        :arg int status_code: Response status code.
    +        :arg str reason: Human-readable reason phrase describing the status
    +            code. If ``None``, it will be filled in from
    +            `http.client.responses` or "Unknown".
    +
    +        .. versionchanged:: 5.0
    +
    +           No longer validates that the response code is in
    +           `http.client.responses`.
    +        """
    +        self._status_code = status_code
    +        if reason is not None:
    +            self._reason = escape.native_str(reason)
    +        else:
    +            self._reason = httputil.responses.get(status_code, "Unknown")
    +
    +    def get_status(self):
    +        """Returns the status code for our response."""
    +        return self._status_code
    +
    +    def set_header(self, name, value):
    +        # type: (str, _HeaderTypes) -> None
    +        """Sets the given response header name and value.
    +
    +        If a datetime is given, we automatically format it according to the
    +        HTTP specification. If the value is not a string, we convert it to
    +        a string. All header values are then encoded as UTF-8.
    +        """
    +        self._headers[name] = self._convert_header_value(value)
    +
    +    def add_header(self, name, value):
    +        # type: (str, _HeaderTypes) -> None
    +        """Adds the given response header and value.
    +
    +        Unlike `set_header`, `add_header` may be called multiple times
    +        to return multiple values for the same header.
    +        """
    +        self._headers.add(name, self._convert_header_value(value))
    +
    +    def clear_header(self, name):
    +        """Clears an outgoing header, undoing a previous `set_header` call.
    +
    +        Note that this method does not apply to multi-valued headers
    +        set by `add_header`.
    +        """
    +        if name in self._headers:
    +            del self._headers[name]
    +
    +    _INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
    +
    +    def _convert_header_value(self, value):
    +        # type: (_HeaderTypes) -> str
    +
    +        # Convert the input value to a str. This type check is a bit
    +        # subtle: The bytes case only executes on python 3, and the
    +        # unicode case only executes on python 2, because the other
    +        # cases are covered by the first match for str.
    +        if isinstance(value, str):
    +            retval = value
    +        elif isinstance(value, bytes):  # py3
    +            # Non-ascii characters in headers are not well supported,
    +            # but if you pass bytes, use latin1 so they pass through as-is.
    +            retval = value.decode('latin1')
    +        elif isinstance(value, unicode_type):  # py2
    +            # TODO: This is inconsistent with the use of latin1 above,
    +            # but it's been that way for a long time. Should it change?
    +            retval = escape.utf8(value)
    +        elif isinstance(value, numbers.Integral):
    +            # return immediately since we know the converted value will be safe
    +            return str(value)
    +        elif isinstance(value, datetime.datetime):
    +            return httputil.format_timestamp(value)
    +        else:
    +            raise TypeError("Unsupported header value %r" % value)
    +        # If \n is allowed into the header, it is possible to inject
    +        # additional headers or split the request.
    +        if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
    +            raise ValueError("Unsafe header value %r", retval)
    +        return retval
    +
    +    _ARG_DEFAULT = object()
    +
    +    def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
    +        """Returns the value of the argument with the given name.
    +
    +        If default is not provided, the argument is considered to be
    +        required, and we raise a `MissingArgumentError` if it is missing.
    +
    +        If the argument appears in the url more than once, we return the
    +        last value.
    +
    +        The returned value is always unicode.
    +        """
    +        return self._get_argument(name, default, self.request.arguments, strip)
    +
    +    def get_arguments(self, name, strip=True):
    +        """Returns a list of the arguments with the given name.
    +
    +        If the argument is not present, returns an empty list.
    +
    +        The returned values are always unicode.
    +        """
    +
    +        # Make sure `get_arguments` isn't accidentally being called with a
    +        # positional argument that's assumed to be a default (like in
    +        # `get_argument`.)
    +        assert isinstance(strip, bool)
    +
    +        return self._get_arguments(name, self.request.arguments, strip)
    +
    +    def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
    +        """Returns the value of the argument with the given name
    +        from the request body.
    +
    +        If default is not provided, the argument is considered to be
    +        required, and we raise a `MissingArgumentError` if it is missing.
    +
    +        If the argument appears in the url more than once, we return the
    +        last value.
    +
    +        The returned value is always unicode.
    +
    +        .. versionadded:: 3.2
    +        """
    +        return self._get_argument(name, default, self.request.body_arguments,
    +                                  strip)
    +
    +    def get_body_arguments(self, name, strip=True):
    +        """Returns a list of the body arguments with the given name.
    +
    +        If the argument is not present, returns an empty list.
    +
    +        The returned values are always unicode.
    +
    +        .. versionadded:: 3.2
    +        """
    +        return self._get_arguments(name, self.request.body_arguments, strip)
    +
    +    def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
    +        """Returns the value of the argument with the given name
    +        from the request query string.
    +
    +        If default is not provided, the argument is considered to be
    +        required, and we raise a `MissingArgumentError` if it is missing.
    +
    +        If the argument appears in the url more than once, we return the
    +        last value.
    +
    +        The returned value is always unicode.
    +
    +        .. versionadded:: 3.2
    +        """
    +        return self._get_argument(name, default,
    +                                  self.request.query_arguments, strip)
    +
    +    def get_query_arguments(self, name, strip=True):
    +        """Returns a list of the query arguments with the given name.
    +
    +        If the argument is not present, returns an empty list.
    +
    +        The returned values are always unicode.
    +
    +        .. versionadded:: 3.2
    +        """
    +        return self._get_arguments(name, self.request.query_arguments, strip)
    +
    +    def _get_argument(self, name, default, source, strip=True):
    +        args = self._get_arguments(name, source, strip=strip)
    +        if not args:
    +            if default is self._ARG_DEFAULT:
    +                raise MissingArgumentError(name)
    +            return default
    +        return args[-1]
    +
    +    def _get_arguments(self, name, source, strip=True):
    +        values = []
    +        for v in source.get(name, []):
    +            v = self.decode_argument(v, name=name)
    +            if isinstance(v, unicode_type):
    +                # Get rid of any weird control chars (unless decoding gave
    +                # us bytes, in which case leave it alone)
    +                v = RequestHandler._remove_control_chars_regex.sub(" ", v)
    +            if strip:
    +                v = v.strip()
    +            values.append(v)
    +        return values
    +
    +    def decode_argument(self, value, name=None):
    +        """Decodes an argument from the request.
    +
    +        The argument has been percent-decoded and is now a byte string.
    +        By default, this method decodes the argument as utf-8 and returns
    +        a unicode string, but this may be overridden in subclasses.
    +
    +        This method is used as a filter for both `get_argument()` and for
    +        values extracted from the url and passed to `get()`/`post()`/etc.
    +
    +        The name of the argument is provided if known, but may be None
    +        (e.g. for unnamed groups in the url regex).
    +        """
    +        try:
    +            return _unicode(value)
    +        except UnicodeDecodeError:
    +            raise HTTPError(400, "Invalid unicode in %s: %r" %
    +                            (name or "url", value[:40]))
    +
    +    @property
    +    def cookies(self):
    +        """An alias for
    +        `self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
    +        return self.request.cookies
    +
    +    def get_cookie(self, name, default=None):
    +        """Returns the value of the request cookie with the given name.
    +
    +        If the named cookie is not present, returns ``default``.
    +
    +        This method only returns cookies that were present in the request.
    +        It does not see the outgoing cookies set by `set_cookie` in this
    +        handler.
    +        """
    +        if self.request.cookies is not None and name in self.request.cookies:
    +            return self.request.cookies[name].value
    +        return default
    +
    +    def set_cookie(self, name, value, domain=None, expires=None, path="/",
    +                   expires_days=None, **kwargs):
    +        """Sets an outgoing cookie name/value with the given options.
    +
    +        Newly-set cookies are not immediately visible via `get_cookie`;
    +        they are not present until the next request.
    +
    +        expires may be a numeric timestamp as returned by `time.time`,
    +        a time tuple as returned by `time.gmtime`, or a
    +        `datetime.datetime` object.
    +
    +        Additional keyword arguments are set on the cookies.Morsel
    +        directly.
    +        See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel
    +        for available attributes.
    +        """
    +        # The cookie library only accepts type str, in both python 2 and 3
    +        name = escape.native_str(name)
    +        value = escape.native_str(value)
    +        if re.search(r"[\x00-\x20]", name + value):
    +            # Don't let us accidentally inject bad stuff
    +            raise ValueError("Invalid cookie %r: %r" % (name, value))
    +        if not hasattr(self, "_new_cookie"):
    +            self._new_cookie = Cookie.SimpleCookie()
    +        if name in self._new_cookie:
    +            del self._new_cookie[name]
    +        self._new_cookie[name] = value
    +        morsel = self._new_cookie[name]
    +        if domain:
    +            morsel["domain"] = domain
    +        if expires_days is not None and not expires:
    +            expires = datetime.datetime.utcnow() + datetime.timedelta(
    +                days=expires_days)
    +        if expires:
    +            morsel["expires"] = httputil.format_timestamp(expires)
    +        if path:
    +            morsel["path"] = path
    +        for k, v in kwargs.items():
    +            if k == 'max_age':
    +                k = 'max-age'
    +
    +            # skip falsy values for httponly and secure flags because
    +            # SimpleCookie sets them regardless
    +            if k in ['httponly', 'secure'] and not v:
    +                continue
    +
    +            morsel[k] = v
    +
    +    def clear_cookie(self, name, path="/", domain=None):
    +        """Deletes the cookie with the given name.
    +
    +        Due to limitations of the cookie protocol, you must pass the same
    +        path and domain to clear a cookie as were used when that cookie
    +        was set (but there is no way to find out on the server side
    +        which values were used for a given cookie).
    +
    +        Similar to `set_cookie`, the effect of this method will not be
    +        seen until the following request.
    +        """
    +        expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
    +        self.set_cookie(name, value="", path=path, expires=expires,
    +                        domain=domain)
    +
    +    def clear_all_cookies(self, path="/", domain=None):
    +        """Deletes all the cookies the user sent with this request.
    +
    +        See `clear_cookie` for more information on the path and domain
    +        parameters.
    +
    +        Similar to `set_cookie`, the effect of this method will not be
    +        seen until the following request.
    +
    +        .. versionchanged:: 3.2
    +
    +           Added the ``path`` and ``domain`` parameters.
    +        """
    +        for name in self.request.cookies:
    +            self.clear_cookie(name, path=path, domain=domain)
    +
    +    def set_secure_cookie(self, name, value, expires_days=30, version=None,
    +                          **kwargs):
    +        """Signs and timestamps a cookie so it cannot be forged.
    +
    +        You must specify the ``cookie_secret`` setting in your Application
    +        to use this method. It should be a long, random sequence of bytes
    +        to be used as the HMAC secret for the signature.
    +
    +        To read a cookie set with this method, use `get_secure_cookie()`.
    +
    +        Note that the ``expires_days`` parameter sets the lifetime of the
    +        cookie in the browser, but is independent of the ``max_age_days``
    +        parameter to `get_secure_cookie`.
    +
    +        Secure cookies may contain arbitrary byte values, not just unicode
    +        strings (unlike regular cookies)
    +
    +        Similar to `set_cookie`, the effect of this method will not be
    +        seen until the following request.
    +
    +        .. versionchanged:: 3.2.1
    +
    +           Added the ``version`` argument.  Introduced cookie version 2
    +           and made it the default.
    +        """
    +        self.set_cookie(name, self.create_signed_value(name, value,
    +                                                       version=version),
    +                        expires_days=expires_days, **kwargs)
    +
    +    def create_signed_value(self, name, value, version=None):
    +        """Signs and timestamps a string so it cannot be forged.
    +
    +        Normally used via set_secure_cookie, but provided as a separate
    +        method for non-cookie uses.  To decode a value not stored
    +        as a cookie use the optional value argument to get_secure_cookie.
    +
    +        .. versionchanged:: 3.2.1
    +
    +           Added the ``version`` argument.  Introduced cookie version 2
    +           and made it the default.
    +        """
    +        self.require_setting("cookie_secret", "secure cookies")
    +        secret = self.application.settings["cookie_secret"]
    +        key_version = None
    +        if isinstance(secret, dict):
    +            if self.application.settings.get("key_version") is None:
    +                raise Exception("key_version setting must be used for secret_key dicts")
    +            key_version = self.application.settings["key_version"]
    +
    +        return create_signed_value(secret, name, value, version=version,
    +                                   key_version=key_version)
    +
    +    def get_secure_cookie(self, name, value=None, max_age_days=31,
    +                          min_version=None):
    +        """Returns the given signed cookie if it validates, or None.
    +
    +        The decoded cookie value is returned as a byte string (unlike
    +        `get_cookie`).
    +
    +        Similar to `get_cookie`, this method only returns cookies that
    +        were present in the request. It does not see outgoing cookies set by
    +        `set_secure_cookie` in this handler.
    +
    +        .. versionchanged:: 3.2.1
    +
    +           Added the ``min_version`` argument.  Introduced cookie version 2;
    +           both versions 1 and 2 are accepted by default.
    +        """
    +        self.require_setting("cookie_secret", "secure cookies")
    +        if value is None:
    +            value = self.get_cookie(name)
    +        return decode_signed_value(self.application.settings["cookie_secret"],
    +                                   name, value, max_age_days=max_age_days,
    +                                   min_version=min_version)
    +
    +    def get_secure_cookie_key_version(self, name, value=None):
    +        """Returns the signing key version of the secure cookie.
    +
    +        The version is returned as int.
    +        """
    +        self.require_setting("cookie_secret", "secure cookies")
    +        if value is None:
    +            value = self.get_cookie(name)
    +        return get_signature_key_version(value)
    +
    +    def redirect(self, url, permanent=False, status=None):
    +        """Sends a redirect to the given (optionally relative) URL.
    +
    +        If the ``status`` argument is specified, that value is used as the
    +        HTTP status code; otherwise either 301 (permanent) or 302
    +        (temporary) is chosen based on the ``permanent`` argument.
    +        The default is 302 (temporary).
    +        """
    +        if self._headers_written:
    +            raise Exception("Cannot redirect after headers have been written")
    +        if status is None:
    +            status = 301 if permanent else 302
    +        else:
    +            assert isinstance(status, int) and 300 <= status <= 399
    +        self.set_status(status)
    +        self.set_header("Location", utf8(url))
    +        self.finish()
    +
    +    def write(self, chunk):
    +        """Writes the given chunk to the output buffer.
    +
    +        To write the output to the network, use the flush() method below.
    +
    +        If the given chunk is a dictionary, we write it as JSON and set
    +        the Content-Type of the response to be ``application/json``.
    +        (if you want to send JSON as a different ``Content-Type``, call
    +        set_header *after* calling write()).
    +
    +        Note that lists are not converted to JSON because of a potential
    +        cross-site security vulnerability.  All JSON output should be
    +        wrapped in a dictionary.  More details at
    +        http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
    +        https://github.com/facebook/tornado/issues/1009
    +        """
    +        if self._finished:
    +            raise RuntimeError("Cannot write() after finish()")
    +        if not isinstance(chunk, (bytes, unicode_type, dict)):
    +            message = "write() only accepts bytes, unicode, and dict objects"
    +            if isinstance(chunk, list):
    +                message += ". Lists not accepted for security reasons; see " + \
    +                    "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
    +            raise TypeError(message)
    +        if isinstance(chunk, dict):
    +            chunk = escape.json_encode(chunk)
    +            self.set_header("Content-Type", "application/json; charset=UTF-8")
    +        chunk = utf8(chunk)
    +        self._write_buffer.append(chunk)
    +
    +    def render(self, template_name, **kwargs):
    +        """Renders the template with the given arguments as the response.
    +
    +        ``render()`` calls ``finish()``, so no other output methods can be called
    +        after it.
    +
    +        Returns a `.Future` with the same semantics as the one returned by `finish`.
    +        Awaiting this `.Future` is optional.
    +
    +        .. versionchanged:: 5.1
    +
    +           Now returns a `.Future` instead of ``None``.
    +        """
    +        if self._finished:
    +            raise RuntimeError("Cannot render() after finish()")
    +        html = self.render_string(template_name, **kwargs)
    +
    +        # Insert the additional JS and CSS added by the modules on the page
    +        js_embed = []
    +        js_files = []
    +        css_embed = []
    +        css_files = []
    +        html_heads = []
    +        html_bodies = []
    +        for module in getattr(self, "_active_modules", {}).values():
    +            embed_part = module.embedded_javascript()
    +            if embed_part:
    +                js_embed.append(utf8(embed_part))
    +            file_part = module.javascript_files()
    +            if file_part:
    +                if isinstance(file_part, (unicode_type, bytes)):
    +                    js_files.append(file_part)
    +                else:
    +                    js_files.extend(file_part)
    +            embed_part = module.embedded_css()
    +            if embed_part:
    +                css_embed.append(utf8(embed_part))
    +            file_part = module.css_files()
    +            if file_part:
    +                if isinstance(file_part, (unicode_type, bytes)):
    +                    css_files.append(file_part)
    +                else:
    +                    css_files.extend(file_part)
    +            head_part = module.html_head()
    +            if head_part:
    +                html_heads.append(utf8(head_part))
    +            body_part = module.html_body()
    +            if body_part:
    +                html_bodies.append(utf8(body_part))
    +
    +        if js_files:
    +            # Maintain order of JavaScript files given by modules
    +            js = self.render_linked_js(js_files)
    +            sloc = html.rindex(b'')
    +            html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
    +        if js_embed:
    +            js = self.render_embed_js(js_embed)
    +            sloc = html.rindex(b'')
    +            html = html[:sloc] + js + b'\n' + html[sloc:]
    +        if css_files:
    +            css = self.render_linked_css(css_files)
    +            hloc = html.index(b'')
    +            html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
    +        if css_embed:
    +            css = self.render_embed_css(css_embed)
    +            hloc = html.index(b'')
    +            html = html[:hloc] + css + b'\n' + html[hloc:]
    +        if html_heads:
    +            hloc = html.index(b'')
    +            html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
    +        if html_bodies:
    +            hloc = html.index(b'')
    +            html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
    +        return self.finish(html)
    +
    +    def render_linked_js(self, js_files):
    +        """Default method used to render the final js links for the
    +        rendered webpage.
    +
    +        Override this method in a sub-classed controller to change the output.
    +        """
    +        paths = []
    +        unique_paths = set()
    +
    +        for path in js_files:
    +            if not is_absolute(path):
    +                path = self.static_url(path)
    +            if path not in unique_paths:
    +                paths.append(path)
    +                unique_paths.add(path)
    +
    +        return ''.join(''
    +                       for p in paths)
    +
    +    def render_embed_js(self, js_embed):
    +        """Default method used to render the final embedded js for the
    +        rendered webpage.
    +
    +        Override this method in a sub-classed controller to change the output.
    +        """
    +        return b''
    +
    +    def render_linked_css(self, css_files):
    +        """Default method used to render the final css links for the
    +        rendered webpage.
    +
    +        Override this method in a sub-classed controller to change the output.
    +        """
    +        paths = []
    +        unique_paths = set()
    +
    +        for path in css_files:
    +            if not is_absolute(path):
    +                path = self.static_url(path)
    +            if path not in unique_paths:
    +                paths.append(path)
    +                unique_paths.add(path)
    +
    +        return ''.join(''
    +                       for p in paths)
    +
    +    def render_embed_css(self, css_embed):
    +        """Default method used to render the final embedded css for the
    +        rendered webpage.
    +
    +        Override this method in a sub-classed controller to change the output.
    +        """
    +        return b''
    +
    +    def render_string(self, template_name, **kwargs):
    +        """Generate the given template with the given arguments.
    +
    +        We return the generated byte string (in utf8). To generate and
    +        write a template as a response, use render() above.
    +        """
    +        # If no template_path is specified, use the path of the calling file
    +        template_path = self.get_template_path()
    +        if not template_path:
    +            frame = sys._getframe(0)
    +            web_file = frame.f_code.co_filename
    +            while frame.f_code.co_filename == web_file:
    +                frame = frame.f_back
    +            template_path = os.path.dirname(frame.f_code.co_filename)
    +        with RequestHandler._template_loader_lock:
    +            if template_path not in RequestHandler._template_loaders:
    +                loader = self.create_template_loader(template_path)
    +                RequestHandler._template_loaders[template_path] = loader
    +            else:
    +                loader = RequestHandler._template_loaders[template_path]
    +        t = loader.load(template_name)
    +        namespace = self.get_template_namespace()
    +        namespace.update(kwargs)
    +        return t.generate(**namespace)
    +
    +    def get_template_namespace(self):
    +        """Returns a dictionary to be used as the default template namespace.
    +
    +        May be overridden by subclasses to add or modify values.
    +
    +        The results of this method will be combined with additional
    +        defaults in the `tornado.template` module and keyword arguments
    +        to `render` or `render_string`.
    +        """
    +        namespace = dict(
    +            handler=self,
    +            request=self.request,
    +            current_user=self.current_user,
    +            locale=self.locale,
    +            _=self.locale.translate,
    +            pgettext=self.locale.pgettext,
    +            static_url=self.static_url,
    +            xsrf_form_html=self.xsrf_form_html,
    +            reverse_url=self.reverse_url
    +        )
    +        namespace.update(self.ui)
    +        return namespace
    +
    +    def create_template_loader(self, template_path):
    +        """Returns a new template loader for the given path.
    +
    +        May be overridden by subclasses.  By default returns a
    +        directory-based loader on the given path, using the
    +        ``autoescape`` and ``template_whitespace`` application
    +        settings.  If a ``template_loader`` application setting is
    +        supplied, uses that instead.
    +        """
    +        settings = self.application.settings
    +        if "template_loader" in settings:
    +            return settings["template_loader"]
    +        kwargs = {}
    +        if "autoescape" in settings:
    +            # autoescape=None means "no escaping", so we have to be sure
    +            # to only pass this kwarg if the user asked for it.
    +            kwargs["autoescape"] = settings["autoescape"]
    +        if "template_whitespace" in settings:
    +            kwargs["whitespace"] = settings["template_whitespace"]
    +        return template.Loader(template_path, **kwargs)
    +
    +    def flush(self, include_footers=False, callback=None):
    +        """Flushes the current output buffer to the network.
    +
    +        The ``callback`` argument, if given, can be used for flow control:
    +        it will be run when all flushed data has been written to the socket.
    +        Note that only one flush callback can be outstanding at a time;
    +        if another flush occurs before the previous flush's callback
    +        has been run, the previous callback will be discarded.
    +
    +        .. versionchanged:: 4.0
    +           Now returns a `.Future` if no callback is given.
    +
    +        .. deprecated:: 5.1
    +
    +           The ``callback`` argument is deprecated and will be removed in
    +           Tornado 6.0.
    +        """
    +        chunk = b"".join(self._write_buffer)
    +        self._write_buffer = []
    +        if not self._headers_written:
    +            self._headers_written = True
    +            for transform in self._transforms:
    +                self._status_code, self._headers, chunk = \
    +                    transform.transform_first_chunk(
    +                        self._status_code, self._headers,
    +                        chunk, include_footers)
    +            # Ignore the chunk and only write the headers for HEAD requests
    +            if self.request.method == "HEAD":
    +                chunk = None
    +
    +            # Finalize the cookie headers (which have been stored in a side
    +            # object so an outgoing cookie could be overwritten before it
    +            # is sent).
    +            if hasattr(self, "_new_cookie"):
    +                for cookie in self._new_cookie.values():
    +                    self.add_header("Set-Cookie", cookie.OutputString(None))
    +
    +            start_line = httputil.ResponseStartLine('',
    +                                                    self._status_code,
    +                                                    self._reason)
    +            return self.request.connection.write_headers(
    +                start_line, self._headers, chunk, callback=callback)
    +        else:
    +            for transform in self._transforms:
    +                chunk = transform.transform_chunk(chunk, include_footers)
    +            # Ignore the chunk and only write the headers for HEAD requests
    +            if self.request.method != "HEAD":
    +                return self.request.connection.write(chunk, callback=callback)
    +            else:
    +                future = Future()
    +                future.set_result(None)
    +                return future
    +
    +    def finish(self, chunk=None):
    +        """Finishes this response, ending the HTTP request.
    +
    +        Passing a ``chunk`` to ``finish()`` is equivalent to passing that
    +        chunk to ``write()`` and then calling ``finish()`` with no arguments.
    +
    +        Returns a `.Future` which may optionally be awaited to track the sending
    +        of the response to the client. This `.Future` resolves when all the response
    +        data has been sent, and raises an error if the connection is closed before all
    +        data can be sent.
    +
    +        .. versionchanged:: 5.1
    +
    +           Now returns a `.Future` instead of ``None``.
    +        """
    +        if self._finished:
    +            raise RuntimeError("finish() called twice")
    +
    +        if chunk is not None:
    +            self.write(chunk)
    +
    +        # Automatically support ETags and add the Content-Length header if
    +        # we have not flushed any content yet.
    +        if not self._headers_written:
    +            if (self._status_code == 200 and
    +                self.request.method in ("GET", "HEAD") and
    +                    "Etag" not in self._headers):
    +                self.set_etag_header()
    +                if self.check_etag_header():
    +                    self._write_buffer = []
    +                    self.set_status(304)
    +            if (self._status_code in (204, 304) or
    +                    (self._status_code >= 100 and self._status_code < 200)):
    +                assert not self._write_buffer, "Cannot send body with %s" % self._status_code
    +                self._clear_headers_for_304()
    +            elif "Content-Length" not in self._headers:
    +                content_length = sum(len(part) for part in self._write_buffer)
    +                self.set_header("Content-Length", content_length)
    +
    +        if hasattr(self.request, "connection"):
    +            # Now that the request is finished, clear the callback we
    +            # set on the HTTPConnection (which would otherwise prevent the
    +            # garbage collection of the RequestHandler when there
    +            # are keepalive connections)
    +            self.request.connection.set_close_callback(None)
    +
    +        future = self.flush(include_footers=True)
    +        self.request.connection.finish()
    +        self._log()
    +        self._finished = True
    +        self.on_finish()
    +        self._break_cycles()
    +        return future
    +
    +    def detach(self):
    +        """Take control of the underlying stream.
    +
    +        Returns the underlying `.IOStream` object and stops all
    +        further HTTP processing. Intended for implementing protocols
    +        like websockets that tunnel over an HTTP handshake.
    +
    +        This method is only supported when HTTP/1.1 is used.
    +
    +        .. versionadded:: 5.1
    +        """
    +        self._finished = True
    +        return self.request.connection.detach()
    +
    +    def _break_cycles(self):
    +        # Break up a reference cycle between this handler and the
    +        # _ui_module closures to allow for faster GC on CPython.
    +        self.ui = None
    +
    +    def send_error(self, status_code=500, **kwargs):
    +        """Sends the given HTTP error code to the browser.
    +
    +        If `flush()` has already been called, it is not possible to send
    +        an error, so this method will simply terminate the response.
    +        If output has been written but not yet flushed, it will be discarded
    +        and replaced with the error page.
    +
    +        Override `write_error()` to customize the error page that is returned.
    +        Additional keyword arguments are passed through to `write_error`.
    +        """
    +        if self._headers_written:
    +            gen_log.error("Cannot send error response after headers written")
    +            if not self._finished:
    +                # If we get an error between writing headers and finishing,
    +                # we are unlikely to be able to finish due to a
    +                # Content-Length mismatch. Try anyway to release the
    +                # socket.
    +                try:
    +                    self.finish()
    +                except Exception:
    +                    gen_log.error("Failed to flush partial response",
    +                                  exc_info=True)
    +            return
    +        self.clear()
    +
    +        reason = kwargs.get('reason')
    +        if 'exc_info' in kwargs:
    +            exception = kwargs['exc_info'][1]
    +            if isinstance(exception, HTTPError) and exception.reason:
    +                reason = exception.reason
    +        self.set_status(status_code, reason=reason)
    +        try:
    +            self.write_error(status_code, **kwargs)
    +        except Exception:
    +            app_log.error("Uncaught exception in write_error", exc_info=True)
    +        if not self._finished:
    +            self.finish()
    +
    +    def write_error(self, status_code, **kwargs):
    +        """Override to implement custom error pages.
    +
    +        ``write_error`` may call `write`, `render`, `set_header`, etc
    +        to produce output as usual.
    +
    +        If this error was caused by an uncaught exception (including
    +        HTTPError), an ``exc_info`` triple will be available as
    +        ``kwargs["exc_info"]``.  Note that this exception may not be
    +        the "current" exception for purposes of methods like
    +        ``sys.exc_info()`` or ``traceback.format_exc``.
    +        """
    +        if self.settings.get("serve_traceback") and "exc_info" in kwargs:
    +            # in debug mode, try to send a traceback
    +            self.set_header('Content-Type', 'text/plain')
    +            for line in traceback.format_exception(*kwargs["exc_info"]):
    +                self.write(line)
    +            self.finish()
    +        else:
    +            self.finish("%(code)d: %(message)s"
    +                        "%(code)d: %(message)s" % {
    +                            "code": status_code,
    +                            "message": self._reason,
    +                        })
    +
    +    @property
    +    def locale(self):
    +        """The locale for the current session.
    +
    +        Determined by either `get_user_locale`, which you can override to
    +        set the locale based on, e.g., a user preference stored in a
    +        database, or `get_browser_locale`, which uses the ``Accept-Language``
    +        header.
    +
    +        .. versionchanged: 4.1
    +           Added a property setter.
    +        """
    +        if not hasattr(self, "_locale"):
    +            self._locale = self.get_user_locale()
    +            if not self._locale:
    +                self._locale = self.get_browser_locale()
    +                assert self._locale
    +        return self._locale
    +
    +    @locale.setter
    +    def locale(self, value):
    +        self._locale = value
    +
    +    def get_user_locale(self):
    +        """Override to determine the locale from the authenticated user.
    +
    +        If None is returned, we fall back to `get_browser_locale()`.
    +
    +        This method should return a `tornado.locale.Locale` object,
    +        most likely obtained via a call like ``tornado.locale.get("en")``
    +        """
    +        return None
    +
    +    def get_browser_locale(self, default="en_US"):
    +        """Determines the user's locale from ``Accept-Language`` header.
    +
    +        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
    +        """
    +        if "Accept-Language" in self.request.headers:
    +            languages = self.request.headers["Accept-Language"].split(",")
    +            locales = []
    +            for language in languages:
    +                parts = language.strip().split(";")
    +                if len(parts) > 1 and parts[1].startswith("q="):
    +                    try:
    +                        score = float(parts[1][2:])
    +                    except (ValueError, TypeError):
    +                        score = 0.0
    +                else:
    +                    score = 1.0
    +                locales.append((parts[0], score))
    +            if locales:
    +                locales.sort(key=lambda pair: pair[1], reverse=True)
    +                codes = [l[0] for l in locales]
    +                return locale.get(*codes)
    +        return locale.get(default)
    +
    +    @property
    +    def current_user(self):
    +        """The authenticated user for this request.
    +
    +        This is set in one of two ways:
    +
    +        * A subclass may override `get_current_user()`, which will be called
    +          automatically the first time ``self.current_user`` is accessed.
    +          `get_current_user()` will only be called once per request,
    +          and is cached for future access::
    +
    +              def get_current_user(self):
    +                  user_cookie = self.get_secure_cookie("user")
    +                  if user_cookie:
    +                      return json.loads(user_cookie)
    +                  return None
    +
    +        * It may be set as a normal variable, typically from an overridden
    +          `prepare()`::
    +
    +              @gen.coroutine
    +              def prepare(self):
    +                  user_id_cookie = self.get_secure_cookie("user_id")
    +                  if user_id_cookie:
    +                      self.current_user = yield load_user(user_id_cookie)
    +
    +        Note that `prepare()` may be a coroutine while `get_current_user()`
    +        may not, so the latter form is necessary if loading the user requires
    +        asynchronous operations.
    +
    +        The user object may be any type of the application's choosing.
    +        """
    +        if not hasattr(self, "_current_user"):
    +            self._current_user = self.get_current_user()
    +        return self._current_user
    +
    +    @current_user.setter
    +    def current_user(self, value):
    +        self._current_user = value
    +
    +    def get_current_user(self):
    +        """Override to determine the current user from, e.g., a cookie.
    +
    +        This method may not be a coroutine.
    +        """
    +        return None
    +
    +    def get_login_url(self):
    +        """Override to customize the login URL based on the request.
    +
    +        By default, we use the ``login_url`` application setting.
    +        """
    +        self.require_setting("login_url", "@tornado.web.authenticated")
    +        return self.application.settings["login_url"]
    +
    +    def get_template_path(self):
    +        """Override to customize template path for each handler.
    +
    +        By default, we use the ``template_path`` application setting.
    +        Return None to load templates relative to the calling file.
    +        """
    +        return self.application.settings.get("template_path")
    +
    +    @property
    +    def xsrf_token(self):
    +        """The XSRF-prevention token for the current user/session.
    +
    +        To prevent cross-site request forgery, we set an '_xsrf' cookie
    +        and include the same '_xsrf' value as an argument with all POST
    +        requests. If the two do not match, we reject the form submission
    +        as a potential forgery.
    +
    +        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    +
    +        This property is of type `bytes`, but it contains only ASCII
    +        characters. If a character string is required, there is no
    +        need to base64-encode it; just decode the byte string as
    +        UTF-8.
    +
    +        .. versionchanged:: 3.2.2
    +           The xsrf token will now be have a random mask applied in every
    +           request, which makes it safe to include the token in pages
    +           that are compressed.  See http://breachattack.com for more
    +           information on the issue fixed by this change.  Old (version 1)
    +           cookies will be converted to version 2 when this method is called
    +           unless the ``xsrf_cookie_version`` `Application` setting is
    +           set to 1.
    +
    +        .. versionchanged:: 4.3
    +           The ``xsrf_cookie_kwargs`` `Application` setting may be
    +           used to supply additional cookie options (which will be
    +           passed directly to `set_cookie`). For example,
    +           ``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
    +           will set the ``secure`` and ``httponly`` flags on the
    +           ``_xsrf`` cookie.
    +        """
    +        if not hasattr(self, "_xsrf_token"):
    +            version, token, timestamp = self._get_raw_xsrf_token()
    +            output_version = self.settings.get("xsrf_cookie_version", 2)
    +            cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
    +            if output_version == 1:
    +                self._xsrf_token = binascii.b2a_hex(token)
    +            elif output_version == 2:
    +                mask = os.urandom(4)
    +                self._xsrf_token = b"|".join([
    +                    b"2",
    +                    binascii.b2a_hex(mask),
    +                    binascii.b2a_hex(_websocket_mask(mask, token)),
    +                    utf8(str(int(timestamp)))])
    +            else:
    +                raise ValueError("unknown xsrf cookie version %d",
    +                                 output_version)
    +            if version is None:
    +                expires_days = 30 if self.current_user else None
    +                self.set_cookie("_xsrf", self._xsrf_token,
    +                                expires_days=expires_days,
    +                                **cookie_kwargs)
    +        return self._xsrf_token
    +
    +    def _get_raw_xsrf_token(self):
    +        """Read or generate the xsrf token in its raw form.
    +
    +        The raw_xsrf_token is a tuple containing:
    +
    +        * version: the version of the cookie from which this token was read,
    +          or None if we generated a new token in this request.
    +        * token: the raw token data; random (non-ascii) bytes.
    +        * timestamp: the time this token was generated (will not be accurate
    +          for version 1 cookies)
    +        """
    +        if not hasattr(self, '_raw_xsrf_token'):
    +            cookie = self.get_cookie("_xsrf")
    +            if cookie:
    +                version, token, timestamp = self._decode_xsrf_token(cookie)
    +            else:
    +                version, token, timestamp = None, None, None
    +            if token is None:
    +                version = None
    +                token = os.urandom(16)
    +                timestamp = time.time()
    +            self._raw_xsrf_token = (version, token, timestamp)
    +        return self._raw_xsrf_token
    +
    +    def _decode_xsrf_token(self, cookie):
    +        """Convert a cookie string into a the tuple form returned by
    +        _get_raw_xsrf_token.
    +        """
    +
    +        try:
    +            m = _signed_value_version_re.match(utf8(cookie))
    +
    +            if m:
    +                version = int(m.group(1))
    +                if version == 2:
    +                    _, mask, masked_token, timestamp = cookie.split("|")
    +
    +                    mask = binascii.a2b_hex(utf8(mask))
    +                    token = _websocket_mask(
    +                        mask, binascii.a2b_hex(utf8(masked_token)))
    +                    timestamp = int(timestamp)
    +                    return version, token, timestamp
    +                else:
    +                    # Treat unknown versions as not present instead of failing.
    +                    raise Exception("Unknown xsrf cookie version")
    +            else:
    +                version = 1
    +                try:
    +                    token = binascii.a2b_hex(utf8(cookie))
    +                except (binascii.Error, TypeError):
    +                    token = utf8(cookie)
    +                # We don't have a usable timestamp in older versions.
    +                timestamp = int(time.time())
    +                return (version, token, timestamp)
    +        except Exception:
    +            # Catch exceptions and return nothing instead of failing.
    +            gen_log.debug("Uncaught exception in _decode_xsrf_token",
    +                          exc_info=True)
    +            return None, None, None
    +
    +    def check_xsrf_cookie(self):
    +        """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
    +
    +        To prevent cross-site request forgery, we set an ``_xsrf``
    +        cookie and include the same value as a non-cookie
    +        field with all ``POST`` requests. If the two do not match, we
    +        reject the form submission as a potential forgery.
    +
    +        The ``_xsrf`` value may be set as either a form field named ``_xsrf``
    +        or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
    +        (the latter is accepted for compatibility with Django).
    +
    +        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    +
    +        Prior to release 1.1.1, this check was ignored if the HTTP header
    +        ``X-Requested-With: XMLHTTPRequest`` was present.  This exception
    +        has been shown to be insecure and has been removed.  For more
    +        information please see
    +        http://www.djangoproject.com/weblog/2011/feb/08/security/
    +        http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
    +
    +        .. versionchanged:: 3.2.2
    +           Added support for cookie version 2.  Both versions 1 and 2 are
    +           supported.
    +        """
    +        token = (self.get_argument("_xsrf", None) or
    +                 self.request.headers.get("X-Xsrftoken") or
    +                 self.request.headers.get("X-Csrftoken"))
    +        if not token:
    +            raise HTTPError(403, "'_xsrf' argument missing from POST")
    +        _, token, _ = self._decode_xsrf_token(token)
    +        _, expected_token, _ = self._get_raw_xsrf_token()
    +        if not token:
    +            raise HTTPError(403, "'_xsrf' argument has invalid format")
    +        if not _time_independent_equals(utf8(token), utf8(expected_token)):
    +            raise HTTPError(403, "XSRF cookie does not match POST argument")
    +
    +    def xsrf_form_html(self):
    +        """An HTML ```` element to be included with all POST forms.
    +
    +        It defines the ``_xsrf`` input value, which we check on all POST
    +        requests to prevent cross-site request forgery. If you have set
    +        the ``xsrf_cookies`` application setting, you must include this
    +        HTML within all of your HTML forms.
    +
    +        In a template, this method should be called with ``{% module
    +        xsrf_form_html() %}``
    +
    +        See `check_xsrf_cookie()` above for more information.
    +        """
    +        return ''
    +
    +    def static_url(self, path, include_host=None, **kwargs):
    +        """Returns a static URL for the given relative static file path.
    +
    +        This method requires you set the ``static_path`` setting in your
    +        application (which specifies the root directory of your static
    +        files).
    +
    +        This method returns a versioned url (by default appending
    +        ``?v=``), which allows the static files to be
    +        cached indefinitely.  This can be disabled by passing
    +        ``include_version=False`` (in the default implementation;
    +        other static file implementations are not required to support
    +        this, but they may support other options).
    +
    +        By default this method returns URLs relative to the current
    +        host, but if ``include_host`` is true the URL returned will be
    +        absolute.  If this handler has an ``include_host`` attribute,
    +        that value will be used as the default for all `static_url`
    +        calls that do not pass ``include_host`` as a keyword argument.
    +
    +        """
    +        self.require_setting("static_path", "static_url")
    +        get_url = self.settings.get("static_handler_class",
    +                                    StaticFileHandler).make_static_url
    +
    +        if include_host is None:
    +            include_host = getattr(self, "include_host", False)
    +
    +        if include_host:
    +            base = self.request.protocol + "://" + self.request.host
    +        else:
    +            base = ""
    +
    +        return base + get_url(self.settings, path, **kwargs)
    +
    +    def require_setting(self, name, feature="this feature"):
    +        """Raises an exception if the given app setting is not defined."""
    +        if not self.application.settings.get(name):
    +            raise Exception("You must define the '%s' setting in your "
    +                            "application to use %s" % (name, feature))
    +
    +    def reverse_url(self, name, *args):
    +        """Alias for `Application.reverse_url`."""
    +        return self.application.reverse_url(name, *args)
    +
    +    def compute_etag(self):
    +        """Computes the etag header to be used for this request.
    +
    +        By default uses a hash of the content written so far.
    +
    +        May be overridden to provide custom etag implementations,
    +        or may return None to disable tornado's default etag support.
    +        """
    +        hasher = hashlib.sha1()
    +        for part in self._write_buffer:
    +            hasher.update(part)
    +        return '"%s"' % hasher.hexdigest()
    +
    +    def set_etag_header(self):
    +        """Sets the response's Etag header using ``self.compute_etag()``.
    +
    +        Note: no header will be set if ``compute_etag()`` returns ``None``.
    +
    +        This method is called automatically when the request is finished.
    +        """
    +        etag = self.compute_etag()
    +        if etag is not None:
    +            self.set_header("Etag", etag)
    +
    +    def check_etag_header(self):
    +        """Checks the ``Etag`` header against requests's ``If-None-Match``.
    +
    +        Returns ``True`` if the request's Etag matches and a 304 should be
    +        returned. For example::
    +
    +            self.set_etag_header()
    +            if self.check_etag_header():
    +                self.set_status(304)
    +                return
    +
    +        This method is called automatically when the request is finished,
    +        but may be called earlier for applications that override
    +        `compute_etag` and want to do an early check for ``If-None-Match``
    +        before completing the request.  The ``Etag`` header should be set
    +        (perhaps with `set_etag_header`) before calling this method.
    +        """
    +        computed_etag = utf8(self._headers.get("Etag", ""))
    +        # Find all weak and strong etag values from If-None-Match header
    +        # because RFC 7232 allows multiple etag values in a single header.
    +        etags = re.findall(
    +            br'\*|(?:W/)?"[^"]*"',
    +            utf8(self.request.headers.get("If-None-Match", ""))
    +        )
    +        if not computed_etag or not etags:
    +            return False
    +
    +        match = False
    +        if etags[0] == b'*':
    +            match = True
    +        else:
    +            # Use a weak comparison when comparing entity-tags.
    +            def val(x):
    +                return x[2:] if x.startswith(b'W/') else x
    +
    +            for etag in etags:
    +                if val(etag) == val(computed_etag):
    +                    match = True
    +                    break
    +        return match
    +
    +    def _stack_context_handle_exception(self, type, value, traceback):
    +        try:
    +            # For historical reasons _handle_request_exception only takes
    +            # the exception value instead of the full triple,
    +            # so re-raise the exception to ensure that it's in
    +            # sys.exc_info()
    +            raise_exc_info((type, value, traceback))
    +        except Exception:
    +            self._handle_request_exception(value)
    +        return True
    +
    +    @gen.coroutine
    +    def _execute(self, transforms, *args, **kwargs):
    +        """Executes this request with the given output transforms."""
    +        self._transforms = transforms
    +        try:
    +            if self.request.method not in self.SUPPORTED_METHODS:
    +                raise HTTPError(405)
    +            self.path_args = [self.decode_argument(arg) for arg in args]
    +            self.path_kwargs = dict((k, self.decode_argument(v, name=k))
    +                                    for (k, v) in kwargs.items())
    +            # If XSRF cookies are turned on, reject form submissions without
    +            # the proper cookie
    +            if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
    +                    self.application.settings.get("xsrf_cookies"):
    +                self.check_xsrf_cookie()
    +
    +            result = self.prepare()
    +            if result is not None:
    +                result = yield result
    +            if self._prepared_future is not None:
    +                # Tell the Application we've finished with prepare()
    +                # and are ready for the body to arrive.
    +                future_set_result_unless_cancelled(self._prepared_future, None)
    +            if self._finished:
    +                return
    +
    +            if _has_stream_request_body(self.__class__):
    +                # In streaming mode request.body is a Future that signals
    +                # the body has been completely received.  The Future has no
    +                # result; the data has been passed to self.data_received
    +                # instead.
    +                try:
    +                    yield self.request.body
    +                except iostream.StreamClosedError:
    +                    return
    +
    +            method = getattr(self, self.request.method.lower())
    +            result = method(*self.path_args, **self.path_kwargs)
    +            if result is not None:
    +                result = yield result
    +            if self._auto_finish and not self._finished:
    +                self.finish()
    +        except Exception as e:
    +            try:
    +                self._handle_request_exception(e)
    +            except Exception:
    +                app_log.error("Exception in exception handler", exc_info=True)
    +            finally:
    +                # Unset result to avoid circular references
    +                result = None
    +            if (self._prepared_future is not None and
    +                    not self._prepared_future.done()):
    +                # In case we failed before setting _prepared_future, do it
    +                # now (to unblock the HTTP server).  Note that this is not
    +                # in a finally block to avoid GC issues prior to Python 3.4.
    +                self._prepared_future.set_result(None)
    +
    +    def data_received(self, chunk):
    +        """Implement this method to handle streamed request data.
    +
    +        Requires the `.stream_request_body` decorator.
    +        """
    +        raise NotImplementedError()
    +
    +    def _log(self):
    +        """Logs the current request.
    +
    +        Sort of deprecated since this functionality was moved to the
    +        Application, but left in place for the benefit of existing apps
    +        that have overridden this method.
    +        """
    +        self.application.log_request(self)
    +
    +    def _request_summary(self):
    +        return "%s %s (%s)" % (self.request.method, self.request.uri,
    +                               self.request.remote_ip)
    +
    +    def _handle_request_exception(self, e):
    +        if isinstance(e, Finish):
    +            # Not an error; just finish the request without logging.
    +            if not self._finished:
    +                self.finish(*e.args)
    +            return
    +        try:
    +            self.log_exception(*sys.exc_info())
    +        except Exception:
    +            # An error here should still get a best-effort send_error()
    +            # to avoid leaking the connection.
    +            app_log.error("Error in exception logger", exc_info=True)
    +        if self._finished:
    +            # Extra errors after the request has been finished should
    +            # be logged, but there is no reason to continue to try and
    +            # send a response.
    +            return
    +        if isinstance(e, HTTPError):
    +            self.send_error(e.status_code, exc_info=sys.exc_info())
    +        else:
    +            self.send_error(500, exc_info=sys.exc_info())
    +
    +    def log_exception(self, typ, value, tb):
    +        """Override to customize logging of uncaught exceptions.
    +
    +        By default logs instances of `HTTPError` as warnings without
    +        stack traces (on the ``tornado.general`` logger), and all
    +        other exceptions as errors with stack traces (on the
    +        ``tornado.application`` logger).
    +
    +        .. versionadded:: 3.1
    +        """
    +        if isinstance(value, HTTPError):
    +            if value.log_message:
    +                format = "%d %s: " + value.log_message
    +                args = ([value.status_code, self._request_summary()] +
    +                        list(value.args))
    +                gen_log.warning(format, *args)
    +        else:
    +            app_log.error("Uncaught exception %s\n%r", self._request_summary(),
    +                          self.request, exc_info=(typ, value, tb))
    +
    +    def _ui_module(self, name, module):
    +        def render(*args, **kwargs):
    +            if not hasattr(self, "_active_modules"):
    +                self._active_modules = {}
    +            if name not in self._active_modules:
    +                self._active_modules[name] = module(self)
    +            rendered = self._active_modules[name].render(*args, **kwargs)
    +            return rendered
    +        return render
    +
    +    def _ui_method(self, method):
    +        return lambda *args, **kwargs: method(self, *args, **kwargs)
    +
    +    def _clear_headers_for_304(self):
    +        # 304 responses should not contain entity headers (defined in
    +        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
    +        # not explicitly allowed by
    +        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
    +        headers = ["Allow", "Content-Encoding", "Content-Language",
    +                   "Content-Length", "Content-MD5", "Content-Range",
    +                   "Content-Type", "Last-Modified"]
    +        for h in headers:
    +            self.clear_header(h)
    +
    +
    +def asynchronous(method):
    +    """Wrap request handler methods with this if they are asynchronous.
    +
    +    This decorator is for callback-style asynchronous methods; for
    +    coroutines, use the ``@gen.coroutine`` decorator without
    +    ``@asynchronous``. (It is legal for legacy reasons to use the two
    +    decorators together provided ``@asynchronous`` is first, but
    +    ``@asynchronous`` will be ignored in this case)
    +
    +    This decorator should only be applied to the :ref:`HTTP verb
    +    methods `; its behavior is undefined for any other method.
    +    This decorator does not *make* a method asynchronous; it tells
    +    the framework that the method *is* asynchronous.  For this decorator
    +    to be useful the method must (at least sometimes) do something
    +    asynchronous.
    +
    +    If this decorator is given, the response is not finished when the
    +    method returns. It is up to the request handler to call
    +    `self.finish() ` to finish the HTTP
    +    request. Without this decorator, the request is automatically
    +    finished when the ``get()`` or ``post()`` method returns. Example:
    +
    +    .. testcode::
    +
    +       class MyRequestHandler(RequestHandler):
    +           @asynchronous
    +           def get(self):
    +              http = httpclient.AsyncHTTPClient()
    +              http.fetch("http://friendfeed.com/", self._on_download)
    +
    +           def _on_download(self, response):
    +              self.write("Downloaded!")
    +              self.finish()
    +
    +    .. testoutput::
    +       :hide:
    +
    +    .. versionchanged:: 3.1
    +       The ability to use ``@gen.coroutine`` without ``@asynchronous``.
    +
    +    .. versionchanged:: 4.3 Returning anything but ``None`` or a
    +       yieldable object from a method decorated with ``@asynchronous``
    +       is an error. Such return values were previously ignored silently.
    +
    +    .. deprecated:: 5.1
    +
    +       This decorator is deprecated and will be removed in Tornado 6.0.
    +       Use coroutines instead.
    +    """
    +    warnings.warn("@asynchronous is deprecated, use coroutines instead",
    +                  DeprecationWarning)
    +    # Delay the IOLoop import because it's not available on app engine.
    +    from tornado.ioloop import IOLoop
    +
    +    @functools.wraps(method)
    +    def wrapper(self, *args, **kwargs):
    +        self._auto_finish = False
    +        with stack_context.ExceptionStackContext(
    +                self._stack_context_handle_exception, delay_warning=True):
    +            result = method(self, *args, **kwargs)
    +            if result is not None:
    +                result = gen.convert_yielded(result)
    +
    +                # If @asynchronous is used with @gen.coroutine, (but
    +                # not @gen.engine), we can automatically finish the
    +                # request when the future resolves.  Additionally,
    +                # the Future will swallow any exceptions so we need
    +                # to throw them back out to the stack context to finish
    +                # the request.
    +                def future_complete(f):
    +                    f.result()
    +                    if not self._finished:
    +                        self.finish()
    +                IOLoop.current().add_future(result, future_complete)
    +                # Once we have done this, hide the Future from our
    +                # caller (i.e. RequestHandler._when_complete), which
    +                # would otherwise set up its own callback and
    +                # exception handler (resulting in exceptions being
    +                # logged twice).
    +                return None
    +            return result
    +    return wrapper
    +
    +
    +def stream_request_body(cls):
    +    """Apply to `RequestHandler` subclasses to enable streaming body support.
    +
    +    This decorator implies the following changes:
    +
    +    * `.HTTPServerRequest.body` is undefined, and body arguments will not
    +      be included in `RequestHandler.get_argument`.
    +    * `RequestHandler.prepare` is called when the request headers have been
    +      read instead of after the entire body has been read.
    +    * The subclass must define a method ``data_received(self, data):``, which
    +      will be called zero or more times as data is available.  Note that
    +      if the request has an empty body, ``data_received`` may not be called.
    +    * ``prepare`` and ``data_received`` may return Futures (such as via
    +      ``@gen.coroutine``, in which case the next method will not be called
    +      until those futures have completed.
    +    * The regular HTTP method (``post``, ``put``, etc) will be called after
    +      the entire body has been read.
    +
    +    See the `file receiver demo `_
    +    for example usage.
    +    """  # noqa: E501
    +    if not issubclass(cls, RequestHandler):
    +        raise TypeError("expected subclass of RequestHandler, got %r", cls)
    +    cls._stream_request_body = True
    +    return cls
    +
    +
    +def _has_stream_request_body(cls):
    +    if not issubclass(cls, RequestHandler):
    +        raise TypeError("expected subclass of RequestHandler, got %r", cls)
    +    return getattr(cls, '_stream_request_body', False)
    +
    +
    +def removeslash(method):
    +    """Use this decorator to remove trailing slashes from the request path.
    +
    +    For example, a request to ``/foo/`` would redirect to ``/foo`` with this
    +    decorator. Your request handler mapping should use a regular expression
    +    like ``r'/foo/*'`` in conjunction with using the decorator.
    +    """
    +    @functools.wraps(method)
    +    def wrapper(self, *args, **kwargs):
    +        if self.request.path.endswith("/"):
    +            if self.request.method in ("GET", "HEAD"):
    +                uri = self.request.path.rstrip("/")
    +                if uri:  # don't try to redirect '/' to ''
    +                    if self.request.query:
    +                        uri += "?" + self.request.query
    +                    self.redirect(uri, permanent=True)
    +                    return
    +            else:
    +                raise HTTPError(404)
    +        return method(self, *args, **kwargs)
    +    return wrapper
    +
    +
    +def addslash(method):
    +    """Use this decorator to add a missing trailing slash to the request path.
    +
    +    For example, a request to ``/foo`` would redirect to ``/foo/`` with this
    +    decorator. Your request handler mapping should use a regular expression
    +    like ``r'/foo/?'`` in conjunction with using the decorator.
    +    """
    +    @functools.wraps(method)
    +    def wrapper(self, *args, **kwargs):
    +        if not self.request.path.endswith("/"):
    +            if self.request.method in ("GET", "HEAD"):
    +                uri = self.request.path + "/"
    +                if self.request.query:
    +                    uri += "?" + self.request.query
    +                self.redirect(uri, permanent=True)
    +                return
    +            raise HTTPError(404)
    +        return method(self, *args, **kwargs)
    +    return wrapper
    +
    +
    +class _ApplicationRouter(ReversibleRuleRouter):
    +    """Routing implementation used internally by `Application`.
    +
    +    Provides a binding between `Application` and `RequestHandler`.
    +    This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
    +        * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
    +        * it allows to use a list/tuple of rules as `~.routing.Rule` target.
    +        ``process_rule`` implementation will substitute this list with an appropriate
    +        `_ApplicationRouter` instance.
    +    """
    +
    +    def __init__(self, application, rules=None):
    +        assert isinstance(application, Application)
    +        self.application = application
    +        super(_ApplicationRouter, self).__init__(rules)
    +
    +    def process_rule(self, rule):
    +        rule = super(_ApplicationRouter, self).process_rule(rule)
    +
    +        if isinstance(rule.target, (list, tuple)):
    +            rule.target = _ApplicationRouter(self.application, rule.target)
    +
    +        return rule
    +
    +    def get_target_delegate(self, target, request, **target_params):
    +        if isclass(target) and issubclass(target, RequestHandler):
    +            return self.application.get_handler_delegate(request, target, **target_params)
    +
    +        return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
    +
    +
    +class Application(ReversibleRouter):
    +    """A collection of request handlers that make up a web application.
    +
    +    Instances of this class are callable and can be passed directly to
    +    HTTPServer to serve the application::
    +
    +        application = web.Application([
    +            (r"/", MainPageHandler),
    +        ])
    +        http_server = httpserver.HTTPServer(application)
    +        http_server.listen(8080)
    +        ioloop.IOLoop.current().start()
    +
    +    The constructor for this class takes in a list of `~.routing.Rule`
    +    objects or tuples of values corresponding to the arguments of
    +    `~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
    +    the values in square brackets being optional. The default matcher is
    +    `~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
    +    instead of ``(PathMatches(regexp), target)``.
    +
    +    A common routing target is a `RequestHandler` subclass, but you can also
    +    use lists of rules as a target, which create a nested routing configuration::
    +
    +        application = web.Application([
    +            (HostMatches("example.com"), [
    +                (r"/", MainPageHandler),
    +                (r"/feed", FeedHandler),
    +            ]),
    +        ])
    +
    +    In addition to this you can use nested `~.routing.Router` instances,
    +    `~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
    +    (see `~.routing` module docs for more information).
    +
    +    When we receive requests, we iterate over the list in order and
    +    instantiate an instance of the first request class whose regexp
    +    matches the request path. The request class can be specified as
    +    either a class object or a (fully-qualified) name.
    +
    +    A dictionary may be passed as the third element (``target_kwargs``)
    +    of the tuple, which will be used as keyword arguments to the handler's
    +    constructor and `~RequestHandler.initialize` method. This pattern
    +    is used for the `StaticFileHandler` in this example (note that a
    +    `StaticFileHandler` can be installed automatically with the
    +    static_path setting described below)::
    +
    +        application = web.Application([
    +            (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
    +        ])
    +
    +    We support virtual hosts with the `add_handlers` method, which takes in
    +    a host regular expression as the first argument::
    +
    +        application.add_handlers(r"www\.myhost\.com", [
    +            (r"/article/([0-9]+)", ArticleHandler),
    +        ])
    +
    +    If there's no match for the current request's host, then ``default_host``
    +    parameter value is matched against host regular expressions.
    +
    +
    +    .. warning::
    +
    +       Applications that do not use TLS may be vulnerable to :ref:`DNS
    +       rebinding ` attacks. This attack is especially
    +       relevant to applications that only listen on ``127.0.0.1` or
    +       other private networks. Appropriate host patterns must be used
    +       (instead of the default of ``r'.*'``) to prevent this risk. The
    +       ``default_host`` argument must not be used in applications that
    +       may be vulnerable to DNS rebinding.
    +
    +    You can serve static files by sending the ``static_path`` setting
    +    as a keyword argument. We will serve those files from the
    +    ``/static/`` URI (this is configurable with the
    +    ``static_url_prefix`` setting), and we will serve ``/favicon.ico``
    +    and ``/robots.txt`` from the same directory.  A custom subclass of
    +    `StaticFileHandler` can be specified with the
    +    ``static_handler_class`` setting.
    +
    +    .. versionchanged:: 4.5
    +       Integration with the new `tornado.routing` module.
    +
    +    """
    +    def __init__(self, handlers=None, default_host=None, transforms=None,
    +                 **settings):
    +        if transforms is None:
    +            self.transforms = []
    +            if settings.get("compress_response") or settings.get("gzip"):
    +                self.transforms.append(GZipContentEncoding)
    +        else:
    +            self.transforms = transforms
    +        self.default_host = default_host
    +        self.settings = settings
    +        self.ui_modules = {'linkify': _linkify,
    +                           'xsrf_form_html': _xsrf_form_html,
    +                           'Template': TemplateModule,
    +                           }
    +        self.ui_methods = {}
    +        self._load_ui_modules(settings.get("ui_modules", {}))
    +        self._load_ui_methods(settings.get("ui_methods", {}))
    +        if self.settings.get("static_path"):
    +            path = self.settings["static_path"]
    +            handlers = list(handlers or [])
    +            static_url_prefix = settings.get("static_url_prefix",
    +                                             "/static/")
    +            static_handler_class = settings.get("static_handler_class",
    +                                                StaticFileHandler)
    +            static_handler_args = settings.get("static_handler_args", {})
    +            static_handler_args['path'] = path
    +            for pattern in [re.escape(static_url_prefix) + r"(.*)",
    +                            r"/(favicon\.ico)", r"/(robots\.txt)"]:
    +                handlers.insert(0, (pattern, static_handler_class,
    +                                    static_handler_args))
    +
    +        if self.settings.get('debug'):
    +            self.settings.setdefault('autoreload', True)
    +            self.settings.setdefault('compiled_template_cache', False)
    +            self.settings.setdefault('static_hash_cache', False)
    +            self.settings.setdefault('serve_traceback', True)
    +
    +        self.wildcard_router = _ApplicationRouter(self, handlers)
    +        self.default_router = _ApplicationRouter(self, [
    +            Rule(AnyMatches(), self.wildcard_router)
    +        ])
    +
    +        # Automatically reload modified modules
    +        if self.settings.get('autoreload'):
    +            from tornado import autoreload
    +            autoreload.start()
    +
    +    def listen(self, port, address="", **kwargs):
    +        """Starts an HTTP server for this application on the given port.
    +
    +        This is a convenience alias for creating an `.HTTPServer`
    +        object and calling its listen method.  Keyword arguments not
    +        supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
    +        `.HTTPServer` constructor.  For advanced uses
    +        (e.g. multi-process mode), do not use this method; create an
    +        `.HTTPServer` and call its
    +        `.TCPServer.bind`/`.TCPServer.start` methods directly.
    +
    +        Note that after calling this method you still need to call
    +        ``IOLoop.current().start()`` to start the server.
    +
    +        Returns the `.HTTPServer` object.
    +
    +        .. versionchanged:: 4.3
    +           Now returns the `.HTTPServer` object.
    +        """
    +        # import is here rather than top level because HTTPServer
    +        # is not importable on appengine
    +        from tornado.httpserver import HTTPServer
    +        server = HTTPServer(self, **kwargs)
    +        server.listen(port, address)
    +        return server
    +
    +    def add_handlers(self, host_pattern, host_handlers):
    +        """Appends the given handlers to our handler list.
    +
    +        Host patterns are processed sequentially in the order they were
    +        added. All matching patterns will be considered.
    +        """
    +        host_matcher = HostMatches(host_pattern)
    +        rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
    +
    +        self.default_router.rules.insert(-1, rule)
    +
    +        if self.default_host is not None:
    +            self.wildcard_router.add_rules([(
    +                DefaultHostMatches(self, host_matcher.host_pattern),
    +                host_handlers
    +            )])
    +
    +    def add_transform(self, transform_class):
    +        self.transforms.append(transform_class)
    +
    +    def _load_ui_methods(self, methods):
    +        if isinstance(methods, types.ModuleType):
    +            self._load_ui_methods(dict((n, getattr(methods, n))
    +                                       for n in dir(methods)))
    +        elif isinstance(methods, list):
    +            for m in methods:
    +                self._load_ui_methods(m)
    +        else:
    +            for name, fn in methods.items():
    +                if not name.startswith("_") and hasattr(fn, "__call__") \
    +                        and name[0].lower() == name[0]:
    +                    self.ui_methods[name] = fn
    +
    +    def _load_ui_modules(self, modules):
    +        if isinstance(modules, types.ModuleType):
    +            self._load_ui_modules(dict((n, getattr(modules, n))
    +                                       for n in dir(modules)))
    +        elif isinstance(modules, list):
    +            for m in modules:
    +                self._load_ui_modules(m)
    +        else:
    +            assert isinstance(modules, dict)
    +            for name, cls in modules.items():
    +                try:
    +                    if issubclass(cls, UIModule):
    +                        self.ui_modules[name] = cls
    +                except TypeError:
    +                    pass
    +
    +    def __call__(self, request):
    +        # Legacy HTTPServer interface
    +        dispatcher = self.find_handler(request)
    +        return dispatcher.execute()
    +
    +    def find_handler(self, request, **kwargs):
    +        route = self.default_router.find_handler(request)
    +        if route is not None:
    +            return route
    +
    +        if self.settings.get('default_handler_class'):
    +            return self.get_handler_delegate(
    +                request,
    +                self.settings['default_handler_class'],
    +                self.settings.get('default_handler_args', {}))
    +
    +        return self.get_handler_delegate(
    +            request, ErrorHandler, {'status_code': 404})
    +
    +    def get_handler_delegate(self, request, target_class, target_kwargs=None,
    +                             path_args=None, path_kwargs=None):
    +        """Returns `~.httputil.HTTPMessageDelegate` that can serve a request
    +        for application and `RequestHandler` subclass.
    +
    +        :arg httputil.HTTPServerRequest request: current HTTP request.
    +        :arg RequestHandler target_class: a `RequestHandler` class.
    +        :arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
    +        :arg list path_args: positional arguments for ``target_class`` HTTP method that
    +            will be executed while handling a request (``get``, ``post`` or any other).
    +        :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
    +        """
    +        return _HandlerDelegate(
    +            self, request, target_class, target_kwargs, path_args, path_kwargs)
    +
    +    def reverse_url(self, name, *args):
    +        """Returns a URL path for handler named ``name``
    +
    +        The handler must be added to the application as a named `URLSpec`.
    +
    +        Args will be substituted for capturing groups in the `URLSpec` regex.
    +        They will be converted to strings if necessary, encoded as utf8,
    +        and url-escaped.
    +        """
    +        reversed_url = self.default_router.reverse_url(name, *args)
    +        if reversed_url is not None:
    +            return reversed_url
    +
    +        raise KeyError("%s not found in named urls" % name)
    +
    +    def log_request(self, handler):
    +        """Writes a completed HTTP request to the logs.
    +
    +        By default writes to the python root logger.  To change
    +        this behavior either subclass Application and override this method,
    +        or pass a function in the application settings dictionary as
    +        ``log_function``.
    +        """
    +        if "log_function" in self.settings:
    +            self.settings["log_function"](handler)
    +            return
    +        if handler.get_status() < 400:
    +            log_method = access_log.info
    +        elif handler.get_status() < 500:
    +            log_method = access_log.warning
    +        else:
    +            log_method = access_log.error
    +        request_time = 1000.0 * handler.request.request_time()
    +        log_method("%d %s %.2fms", handler.get_status(),
    +                   handler._request_summary(), request_time)
    +
    +
    +class _HandlerDelegate(httputil.HTTPMessageDelegate):
    +    def __init__(self, application, request, handler_class, handler_kwargs,
    +                 path_args, path_kwargs):
    +        self.application = application
    +        self.connection = request.connection
    +        self.request = request
    +        self.handler_class = handler_class
    +        self.handler_kwargs = handler_kwargs or {}
    +        self.path_args = path_args or []
    +        self.path_kwargs = path_kwargs or {}
    +        self.chunks = []
    +        self.stream_request_body = _has_stream_request_body(self.handler_class)
    +
    +    def headers_received(self, start_line, headers):
    +        if self.stream_request_body:
    +            self.request.body = Future()
    +            return self.execute()
    +
    +    def data_received(self, data):
    +        if self.stream_request_body:
    +            return self.handler.data_received(data)
    +        else:
    +            self.chunks.append(data)
    +
    +    def finish(self):
    +        if self.stream_request_body:
    +            future_set_result_unless_cancelled(self.request.body, None)
    +        else:
    +            self.request.body = b''.join(self.chunks)
    +            self.request._parse_body()
    +            self.execute()
    +
    +    def on_connection_close(self):
    +        if self.stream_request_body:
    +            self.handler.on_connection_close()
    +        else:
    +            self.chunks = None
    +
    +    def execute(self):
    +        # If template cache is disabled (usually in the debug mode),
    +        # re-compile templates and reload static files on every
    +        # request so you don't need to restart to see changes
    +        if not self.application.settings.get("compiled_template_cache", True):
    +            with RequestHandler._template_loader_lock:
    +                for loader in RequestHandler._template_loaders.values():
    +                    loader.reset()
    +        if not self.application.settings.get('static_hash_cache', True):
    +            StaticFileHandler.reset()
    +
    +        self.handler = self.handler_class(self.application, self.request,
    +                                          **self.handler_kwargs)
    +        transforms = [t(self.request) for t in self.application.transforms]
    +
    +        if self.stream_request_body:
    +            self.handler._prepared_future = Future()
    +        # Note that if an exception escapes handler._execute it will be
    +        # trapped in the Future it returns (which we are ignoring here,
    +        # leaving it to be logged when the Future is GC'd).
    +        # However, that shouldn't happen because _execute has a blanket
    +        # except handler, and we cannot easily access the IOLoop here to
    +        # call add_future (because of the requirement to remain compatible
    +        # with WSGI)
    +        self.handler._execute(transforms, *self.path_args,
    +                              **self.path_kwargs)
    +        # If we are streaming the request body, then execute() is finished
    +        # when the handler has prepared to receive the body.  If not,
    +        # it doesn't matter when execute() finishes (so we return None)
    +        return self.handler._prepared_future
    +
    +
    +class HTTPError(Exception):
    +    """An exception that will turn into an HTTP error response.
    +
    +    Raising an `HTTPError` is a convenient alternative to calling
    +    `RequestHandler.send_error` since it automatically ends the
    +    current function.
    +
    +    To customize the response sent with an `HTTPError`, override
    +    `RequestHandler.write_error`.
    +
    +    :arg int status_code: HTTP status code.  Must be listed in
    +        `httplib.responses ` unless the ``reason``
    +        keyword argument is given.
    +    :arg str log_message: Message to be written to the log for this error
    +        (will not be shown to the user unless the `Application` is in debug
    +        mode).  May contain ``%s``-style placeholders, which will be filled
    +        in with remaining positional parameters.
    +    :arg str reason: Keyword-only argument.  The HTTP "reason" phrase
    +        to pass in the status line along with ``status_code``.  Normally
    +        determined automatically from ``status_code``, but can be used
    +        to use a non-standard numeric code.
    +    """
    +    def __init__(self, status_code=500, log_message=None, *args, **kwargs):
    +        self.status_code = status_code
    +        self.log_message = log_message
    +        self.args = args
    +        self.reason = kwargs.get('reason', None)
    +        if log_message and not args:
    +            self.log_message = log_message.replace('%', '%%')
    +
    +    def __str__(self):
    +        message = "HTTP %d: %s" % (
    +            self.status_code,
    +            self.reason or httputil.responses.get(self.status_code, 'Unknown'))
    +        if self.log_message:
    +            return message + " (" + (self.log_message % self.args) + ")"
    +        else:
    +            return message
    +
    +
    +class Finish(Exception):
    +    """An exception that ends the request without producing an error response.
    +
    +    When `Finish` is raised in a `RequestHandler`, the request will
    +    end (calling `RequestHandler.finish` if it hasn't already been
    +    called), but the error-handling methods (including
    +    `RequestHandler.write_error`) will not be called.
    +
    +    If `Finish()` was created with no arguments, the pending response
    +    will be sent as-is. If `Finish()` was given an argument, that
    +    argument will be passed to `RequestHandler.finish()`.
    +
    +    This can be a more convenient way to implement custom error pages
    +    than overriding ``write_error`` (especially in library code)::
    +
    +        if self.current_user is None:
    +            self.set_status(401)
    +            self.set_header('WWW-Authenticate', 'Basic realm="something"')
    +            raise Finish()
    +
    +    .. versionchanged:: 4.3
    +       Arguments passed to ``Finish()`` will be passed on to
    +       `RequestHandler.finish`.
    +    """
    +    pass
    +
    +
    +class MissingArgumentError(HTTPError):
    +    """Exception raised by `RequestHandler.get_argument`.
    +
    +    This is a subclass of `HTTPError`, so if it is uncaught a 400 response
    +    code will be used instead of 500 (and a stack trace will not be logged).
    +
    +    .. versionadded:: 3.1
    +    """
    +    def __init__(self, arg_name):
    +        super(MissingArgumentError, self).__init__(
    +            400, 'Missing argument %s' % arg_name)
    +        self.arg_name = arg_name
    +
    +
    +class ErrorHandler(RequestHandler):
    +    """Generates an error response with ``status_code`` for all requests."""
    +    def initialize(self, status_code):
    +        self.set_status(status_code)
    +
    +    def prepare(self):
    +        raise HTTPError(self._status_code)
    +
    +    def check_xsrf_cookie(self):
    +        # POSTs to an ErrorHandler don't actually have side effects,
    +        # so we don't need to check the xsrf token.  This allows POSTs
    +        # to the wrong url to return a 404 instead of 403.
    +        pass
    +
    +
    +class RedirectHandler(RequestHandler):
    +    """Redirects the client to the given URL for all GET requests.
    +
    +    You should provide the keyword argument ``url`` to the handler, e.g.::
    +
    +        application = web.Application([
    +            (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
    +        ])
    +
    +    `RedirectHandler` supports regular expression substitutions. E.g., to
    +    swap the first and second parts of a path while preserving the remainder::
    +
    +        application = web.Application([
    +            (r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
    +        ])
    +
    +    The final URL is formatted with `str.format` and the substrings that match
    +    the capturing groups. In the above example, a request to "/a/b/c" would be
    +    formatted like::
    +
    +        str.format("/{1}/{0}/{2}", "a", "b", "c")  # -> "/b/a/c"
    +
    +    Use Python's :ref:`format string syntax ` to customize how
    +    values are substituted.
    +
    +    .. versionchanged:: 4.5
    +       Added support for substitutions into the destination URL.
    +
    +    .. versionchanged:: 5.0
    +       If any query arguments are present, they will be copied to the
    +       destination URL.
    +    """
    +    def initialize(self, url, permanent=True):
    +        self._url = url
    +        self._permanent = permanent
    +
    +    def get(self, *args):
    +        to_url = self._url.format(*args)
    +        if self.request.query_arguments:
    +            to_url = httputil.url_concat(
    +                to_url, list(httputil.qs_to_qsl(self.request.query_arguments)))
    +        self.redirect(to_url, permanent=self._permanent)
    +
    +
    +class StaticFileHandler(RequestHandler):
    +    """A simple handler that can serve static content from a directory.
    +
    +    A `StaticFileHandler` is configured automatically if you pass the
    +    ``static_path`` keyword argument to `Application`.  This handler
    +    can be customized with the ``static_url_prefix``, ``static_handler_class``,
    +    and ``static_handler_args`` settings.
    +
    +    To map an additional path to this handler for a static data directory
    +    you would add a line to your application like::
    +
    +        application = web.Application([
    +            (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
    +        ])
    +
    +    The handler constructor requires a ``path`` argument, which specifies the
    +    local root directory of the content to be served.
    +
    +    Note that a capture group in the regex is required to parse the value for
    +    the ``path`` argument to the get() method (different than the constructor
    +    argument above); see `URLSpec` for details.
    +
    +    To serve a file like ``index.html`` automatically when a directory is
    +    requested, set ``static_handler_args=dict(default_filename="index.html")``
    +    in your application settings, or add ``default_filename`` as an initializer
    +    argument for your ``StaticFileHandler``.
    +
    +    To maximize the effectiveness of browser caching, this class supports
    +    versioned urls (by default using the argument ``?v=``).  If a version
    +    is given, we instruct the browser to cache this file indefinitely.
    +    `make_static_url` (also available as `RequestHandler.static_url`) can
    +    be used to construct a versioned url.
    +
    +    This handler is intended primarily for use in development and light-duty
    +    file serving; for heavy traffic it will be more efficient to use
    +    a dedicated static file server (such as nginx or Apache).  We support
    +    the HTTP ``Accept-Ranges`` mechanism to return partial content (because
    +    some browsers require this functionality to be present to seek in
    +    HTML5 audio or video).
    +
    +    **Subclassing notes**
    +
    +    This class is designed to be extensible by subclassing, but because
    +    of the way static urls are generated with class methods rather than
    +    instance methods, the inheritance patterns are somewhat unusual.
    +    Be sure to use the ``@classmethod`` decorator when overriding a
    +    class method.  Instance methods may use the attributes ``self.path``
    +    ``self.absolute_path``, and ``self.modified``.
    +
    +    Subclasses should only override methods discussed in this section;
    +    overriding other methods is error-prone.  Overriding
    +    ``StaticFileHandler.get`` is particularly problematic due to the
    +    tight coupling with ``compute_etag`` and other methods.
    +
    +    To change the way static urls are generated (e.g. to match the behavior
    +    of another server or CDN), override `make_static_url`, `parse_url_path`,
    +    `get_cache_time`, and/or `get_version`.
    +
    +    To replace all interaction with the filesystem (e.g. to serve
    +    static content from a database), override `get_content`,
    +    `get_content_size`, `get_modified_time`, `get_absolute_path`, and
    +    `validate_absolute_path`.
    +
    +    .. versionchanged:: 3.1
    +       Many of the methods for subclasses were added in Tornado 3.1.
    +    """
    +    CACHE_MAX_AGE = 86400 * 365 * 10  # 10 years
    +
    +    _static_hashes = {}  # type: typing.Dict
    +    _lock = threading.Lock()  # protects _static_hashes
    +
    +    def initialize(self, path, default_filename=None):
    +        self.root = path
    +        self.default_filename = default_filename
    +
    +    @classmethod
    +    def reset(cls):
    +        with cls._lock:
    +            cls._static_hashes = {}
    +
    +    def head(self, path):
    +        return self.get(path, include_body=False)
    +
    +    @gen.coroutine
    +    def get(self, path, include_body=True):
    +        # Set up our path instance variables.
    +        self.path = self.parse_url_path(path)
    +        del path  # make sure we don't refer to path instead of self.path again
    +        absolute_path = self.get_absolute_path(self.root, self.path)
    +        self.absolute_path = self.validate_absolute_path(
    +            self.root, absolute_path)
    +        if self.absolute_path is None:
    +            return
    +
    +        self.modified = self.get_modified_time()
    +        self.set_headers()
    +
    +        if self.should_return_304():
    +            self.set_status(304)
    +            return
    +
    +        request_range = None
    +        range_header = self.request.headers.get("Range")
    +        if range_header:
    +            # As per RFC 2616 14.16, if an invalid Range header is specified,
    +            # the request will be treated as if the header didn't exist.
    +            request_range = httputil._parse_request_range(range_header)
    +
    +        size = self.get_content_size()
    +        if request_range:
    +            start, end = request_range
    +            if (start is not None and start >= size) or end == 0:
    +                # As per RFC 2616 14.35.1, a range is not satisfiable only: if
    +                # the first requested byte is equal to or greater than the
    +                # content, or when a suffix with length 0 is specified
    +                self.set_status(416)  # Range Not Satisfiable
    +                self.set_header("Content-Type", "text/plain")
    +                self.set_header("Content-Range", "bytes */%s" % (size, ))
    +                return
    +            if start is not None and start < 0:
    +                start += size
    +            if end is not None and end > size:
    +                # Clients sometimes blindly use a large range to limit their
    +                # download size; cap the endpoint at the actual file size.
    +                end = size
    +            # Note: only return HTTP 206 if less than the entire range has been
    +            # requested. Not only is this semantically correct, but Chrome
    +            # refuses to play audio if it gets an HTTP 206 in response to
    +            # ``Range: bytes=0-``.
    +            if size != (end or size) - (start or 0):
    +                self.set_status(206)  # Partial Content
    +                self.set_header("Content-Range",
    +                                httputil._get_content_range(start, end, size))
    +        else:
    +            start = end = None
    +
    +        if start is not None and end is not None:
    +            content_length = end - start
    +        elif end is not None:
    +            content_length = end
    +        elif start is not None:
    +            content_length = size - start
    +        else:
    +            content_length = size
    +        self.set_header("Content-Length", content_length)
    +
    +        if include_body:
    +            content = self.get_content(self.absolute_path, start, end)
    +            if isinstance(content, bytes):
    +                content = [content]
    +            for chunk in content:
    +                try:
    +                    self.write(chunk)
    +                    yield self.flush()
    +                except iostream.StreamClosedError:
    +                    return
    +        else:
    +            assert self.request.method == "HEAD"
    +
    +    def compute_etag(self):
    +        """Sets the ``Etag`` header based on static url version.
    +
    +        This allows efficient ``If-None-Match`` checks against cached
    +        versions, and sends the correct ``Etag`` for a partial response
    +        (i.e. the same ``Etag`` as the full file).
    +
    +        .. versionadded:: 3.1
    +        """
    +        version_hash = self._get_cached_version(self.absolute_path)
    +        if not version_hash:
    +            return None
    +        return '"%s"' % (version_hash, )
    +
    +    def set_headers(self):
    +        """Sets the content and caching headers on the response.
    +
    +        .. versionadded:: 3.1
    +        """
    +        self.set_header("Accept-Ranges", "bytes")
    +        self.set_etag_header()
    +
    +        if self.modified is not None:
    +            self.set_header("Last-Modified", self.modified)
    +
    +        content_type = self.get_content_type()
    +        if content_type:
    +            self.set_header("Content-Type", content_type)
    +
    +        cache_time = self.get_cache_time(self.path, self.modified,
    +                                         content_type)
    +        if cache_time > 0:
    +            self.set_header("Expires", datetime.datetime.utcnow() +
    +                            datetime.timedelta(seconds=cache_time))
    +            self.set_header("Cache-Control", "max-age=" + str(cache_time))
    +
    +        self.set_extra_headers(self.path)
    +
    +    def should_return_304(self):
    +        """Returns True if the headers indicate that we should return 304.
    +
    +        .. versionadded:: 3.1
    +        """
    +        # If client sent If-None-Match, use it, ignore If-Modified-Since
    +        if self.request.headers.get('If-None-Match'):
    +            return self.check_etag_header()
    +
    +        # Check the If-Modified-Since, and don't send the result if the
    +        # content has not been modified
    +        ims_value = self.request.headers.get("If-Modified-Since")
    +        if ims_value is not None:
    +            date_tuple = email.utils.parsedate(ims_value)
    +            if date_tuple is not None:
    +                if_since = datetime.datetime(*date_tuple[:6])
    +                if if_since >= self.modified:
    +                    return True
    +
    +        return False
    +
    +    @classmethod
    +    def get_absolute_path(cls, root, path):
    +        """Returns the absolute location of ``path`` relative to ``root``.
    +
    +        ``root`` is the path configured for this `StaticFileHandler`
    +        (in most cases the ``static_path`` `Application` setting).
    +
    +        This class method may be overridden in subclasses.  By default
    +        it returns a filesystem path, but other strings may be used
    +        as long as they are unique and understood by the subclass's
    +        overridden `get_content`.
    +
    +        .. versionadded:: 3.1
    +        """
    +        abspath = os.path.abspath(os.path.join(root, path))
    +        return abspath
    +
    +    def validate_absolute_path(self, root, absolute_path):
    +        """Validate and return the absolute path.
    +
    +        ``root`` is the configured path for the `StaticFileHandler`,
    +        and ``path`` is the result of `get_absolute_path`
    +
    +        This is an instance method called during request processing,
    +        so it may raise `HTTPError` or use methods like
    +        `RequestHandler.redirect` (return None after redirecting to
    +        halt further processing).  This is where 404 errors for missing files
    +        are generated.
    +
    +        This method may modify the path before returning it, but note that
    +        any such modifications will not be understood by `make_static_url`.
    +
    +        In instance methods, this method's result is available as
    +        ``self.absolute_path``.
    +
    +        .. versionadded:: 3.1
    +        """
    +        # os.path.abspath strips a trailing /.
    +        # We must add it back to `root` so that we only match files
    +        # in a directory named `root` instead of files starting with
    +        # that prefix.
    +        root = os.path.abspath(root)
    +        if not root.endswith(os.path.sep):
    +            # abspath always removes a trailing slash, except when
    +            # root is '/'. This is an unusual case, but several projects
    +            # have independently discovered this technique to disable
    +            # Tornado's path validation and (hopefully) do their own,
    +            # so we need to support it.
    +            root += os.path.sep
    +        # The trailing slash also needs to be temporarily added back
    +        # the requested path so a request to root/ will match.
    +        if not (absolute_path + os.path.sep).startswith(root):
    +            raise HTTPError(403, "%s is not in root static directory",
    +                            self.path)
    +        if (os.path.isdir(absolute_path) and
    +                self.default_filename is not None):
    +            # need to look at the request.path here for when path is empty
    +            # but there is some prefix to the path that was already
    +            # trimmed by the routing
    +            if not self.request.path.endswith("/"):
    +                self.redirect(self.request.path + "/", permanent=True)
    +                return
    +            absolute_path = os.path.join(absolute_path, self.default_filename)
    +        if not os.path.exists(absolute_path):
    +            raise HTTPError(404)
    +        if not os.path.isfile(absolute_path):
    +            raise HTTPError(403, "%s is not a file", self.path)
    +        return absolute_path
    +
    +    @classmethod
    +    def get_content(cls, abspath, start=None, end=None):
    +        """Retrieve the content of the requested resource which is located
    +        at the given absolute path.
    +
    +        This class method may be overridden by subclasses.  Note that its
    +        signature is different from other overridable class methods
    +        (no ``settings`` argument); this is deliberate to ensure that
    +        ``abspath`` is able to stand on its own as a cache key.
    +
    +        This method should either return a byte string or an iterator
    +        of byte strings.  The latter is preferred for large files
    +        as it helps reduce memory fragmentation.
    +
    +        .. versionadded:: 3.1
    +        """
    +        with open(abspath, "rb") as file:
    +            if start is not None:
    +                file.seek(start)
    +            if end is not None:
    +                remaining = end - (start or 0)
    +            else:
    +                remaining = None
    +            while True:
    +                chunk_size = 64 * 1024
    +                if remaining is not None and remaining < chunk_size:
    +                    chunk_size = remaining
    +                chunk = file.read(chunk_size)
    +                if chunk:
    +                    if remaining is not None:
    +                        remaining -= len(chunk)
    +                    yield chunk
    +                else:
    +                    if remaining is not None:
    +                        assert remaining == 0
    +                    return
    +
    +    @classmethod
    +    def get_content_version(cls, abspath):
    +        """Returns a version string for the resource at the given path.
    +
    +        This class method may be overridden by subclasses.  The
    +        default implementation is a hash of the file's contents.
    +
    +        .. versionadded:: 3.1
    +        """
    +        data = cls.get_content(abspath)
    +        hasher = hashlib.md5()
    +        if isinstance(data, bytes):
    +            hasher.update(data)
    +        else:
    +            for chunk in data:
    +                hasher.update(chunk)
    +        return hasher.hexdigest()
    +
    +    def _stat(self):
    +        if not hasattr(self, '_stat_result'):
    +            self._stat_result = os.stat(self.absolute_path)
    +        return self._stat_result
    +
    +    def get_content_size(self):
    +        """Retrieve the total size of the resource at the given path.
    +
    +        This method may be overridden by subclasses.
    +
    +        .. versionadded:: 3.1
    +
    +        .. versionchanged:: 4.0
    +           This method is now always called, instead of only when
    +           partial results are requested.
    +        """
    +        stat_result = self._stat()
    +        return stat_result[stat.ST_SIZE]
    +
    +    def get_modified_time(self):
    +        """Returns the time that ``self.absolute_path`` was last modified.
    +
    +        May be overridden in subclasses.  Should return a `~datetime.datetime`
    +        object or None.
    +
    +        .. versionadded:: 3.1
    +        """
    +        stat_result = self._stat()
    +        modified = datetime.datetime.utcfromtimestamp(
    +            stat_result[stat.ST_MTIME])
    +        return modified
    +
    +    def get_content_type(self):
    +        """Returns the ``Content-Type`` header to be used for this request.
    +
    +        .. versionadded:: 3.1
    +        """
    +        mime_type, encoding = mimetypes.guess_type(self.absolute_path)
    +        # per RFC 6713, use the appropriate type for a gzip compressed file
    +        if encoding == "gzip":
    +            return "application/gzip"
    +        # As of 2015-07-21 there is no bzip2 encoding defined at
    +        # http://www.iana.org/assignments/media-types/media-types.xhtml
    +        # So for that (and any other encoding), use octet-stream.
    +        elif encoding is not None:
    +            return "application/octet-stream"
    +        elif mime_type is not None:
    +            return mime_type
    +        # if mime_type not detected, use application/octet-stream
    +        else:
    +            return "application/octet-stream"
    +
    +    def set_extra_headers(self, path):
    +        """For subclass to add extra headers to the response"""
    +        pass
    +
    +    def get_cache_time(self, path, modified, mime_type):
    +        """Override to customize cache control behavior.
    +
    +        Return a positive number of seconds to make the result
    +        cacheable for that amount of time or 0 to mark resource as
    +        cacheable for an unspecified amount of time (subject to
    +        browser heuristics).
    +
    +        By default returns cache expiry of 10 years for resources requested
    +        with ``v`` argument.
    +        """
    +        return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
    +
    +    @classmethod
    +    def make_static_url(cls, settings, path, include_version=True):
    +        """Constructs a versioned url for the given path.
    +
    +        This method may be overridden in subclasses (but note that it
    +        is a class method rather than an instance method).  Subclasses
    +        are only required to implement the signature
    +        ``make_static_url(cls, settings, path)``; other keyword
    +        arguments may be passed through `~RequestHandler.static_url`
    +        but are not standard.
    +
    +        ``settings`` is the `Application.settings` dictionary.  ``path``
    +        is the static path being requested.  The url returned should be
    +        relative to the current host.
    +
    +        ``include_version`` determines whether the generated URL should
    +        include the query string containing the version hash of the
    +        file corresponding to the given ``path``.
    +
    +        """
    +        url = settings.get('static_url_prefix', '/static/') + path
    +        if not include_version:
    +            return url
    +
    +        version_hash = cls.get_version(settings, path)
    +        if not version_hash:
    +            return url
    +
    +        return '%s?v=%s' % (url, version_hash)
    +
    +    def parse_url_path(self, url_path):
    +        """Converts a static URL path into a filesystem path.
    +
    +        ``url_path`` is the path component of the URL with
    +        ``static_url_prefix`` removed.  The return value should be
    +        filesystem path relative to ``static_path``.
    +
    +        This is the inverse of `make_static_url`.
    +        """
    +        if os.path.sep != "/":
    +            url_path = url_path.replace("/", os.path.sep)
    +        return url_path
    +
    +    @classmethod
    +    def get_version(cls, settings, path):
    +        """Generate the version string to be used in static URLs.
    +
    +        ``settings`` is the `Application.settings` dictionary and ``path``
    +        is the relative location of the requested asset on the filesystem.
    +        The returned value should be a string, or ``None`` if no version
    +        could be determined.
    +
    +        .. versionchanged:: 3.1
    +           This method was previously recommended for subclasses to override;
    +           `get_content_version` is now preferred as it allows the base
    +           class to handle caching of the result.
    +        """
    +        abs_path = cls.get_absolute_path(settings['static_path'], path)
    +        return cls._get_cached_version(abs_path)
    +
    +    @classmethod
    +    def _get_cached_version(cls, abs_path):
    +        with cls._lock:
    +            hashes = cls._static_hashes
    +            if abs_path not in hashes:
    +                try:
    +                    hashes[abs_path] = cls.get_content_version(abs_path)
    +                except Exception:
    +                    gen_log.error("Could not open static file %r", abs_path)
    +                    hashes[abs_path] = None
    +            hsh = hashes.get(abs_path)
    +            if hsh:
    +                return hsh
    +        return None
    +
    +
    +class FallbackHandler(RequestHandler):
    +    """A `RequestHandler` that wraps another HTTP server callback.
    +
    +    The fallback is a callable object that accepts an
    +    `~.httputil.HTTPServerRequest`, such as an `Application` or
    +    `tornado.wsgi.WSGIContainer`.  This is most useful to use both
    +    Tornado ``RequestHandlers`` and WSGI in the same server.  Typical
    +    usage::
    +
    +        wsgi_app = tornado.wsgi.WSGIContainer(
    +            django.core.handlers.wsgi.WSGIHandler())
    +        application = tornado.web.Application([
    +            (r"/foo", FooHandler),
    +            (r".*", FallbackHandler, dict(fallback=wsgi_app),
    +        ])
    +    """
    +    def initialize(self, fallback):
    +        self.fallback = fallback
    +
    +    def prepare(self):
    +        self.fallback(self.request)
    +        self._finished = True
    +        self.on_finish()
    +
    +
    +class OutputTransform(object):
    +    """A transform modifies the result of an HTTP request (e.g., GZip encoding)
    +
    +    Applications are not expected to create their own OutputTransforms
    +    or interact with them directly; the framework chooses which transforms
    +    (if any) to apply.
    +    """
    +    def __init__(self, request):
    +        pass
    +
    +    def transform_first_chunk(self, status_code, headers, chunk, finishing):
    +        # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
    +        return status_code, headers, chunk
    +
    +    def transform_chunk(self, chunk, finishing):
    +        return chunk
    +
    +
    +class GZipContentEncoding(OutputTransform):
    +    """Applies the gzip content encoding to the response.
    +
    +    See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
    +
    +    .. versionchanged:: 4.0
    +        Now compresses all mime types beginning with ``text/``, instead
    +        of just a whitelist. (the whitelist is still used for certain
    +        non-text mime types).
    +    """
    +    # Whitelist of compressible mime types (in addition to any types
    +    # beginning with "text/").
    +    CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
    +                         "application/xml", "application/atom+xml",
    +                         "application/json", "application/xhtml+xml",
    +                         "image/svg+xml"])
    +    # Python's GzipFile defaults to level 9, while most other gzip
    +    # tools (including gzip itself) default to 6, which is probably a
    +    # better CPU/size tradeoff.
    +    GZIP_LEVEL = 6
    +    # Responses that are too short are unlikely to benefit from gzipping
    +    # after considering the "Content-Encoding: gzip" header and the header
    +    # inside the gzip encoding.
    +    # Note that responses written in multiple chunks will be compressed
    +    # regardless of size.
    +    MIN_LENGTH = 1024
    +
    +    def __init__(self, request):
    +        self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
    +
    +    def _compressible_type(self, ctype):
    +        return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
    +
    +    def transform_first_chunk(self, status_code, headers, chunk, finishing):
    +        # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
    +        # TODO: can/should this type be inherited from the superclass?
    +        if 'Vary' in headers:
    +            headers['Vary'] += ', Accept-Encoding'
    +        else:
    +            headers['Vary'] = 'Accept-Encoding'
    +        if self._gzipping:
    +            ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
    +            self._gzipping = self._compressible_type(ctype) and \
    +                (not finishing or len(chunk) >= self.MIN_LENGTH) and \
    +                ("Content-Encoding" not in headers)
    +        if self._gzipping:
    +            headers["Content-Encoding"] = "gzip"
    +            self._gzip_value = BytesIO()
    +            self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
    +                                            compresslevel=self.GZIP_LEVEL)
    +            chunk = self.transform_chunk(chunk, finishing)
    +            if "Content-Length" in headers:
    +                # The original content length is no longer correct.
    +                # If this is the last (and only) chunk, we can set the new
    +                # content-length; otherwise we remove it and fall back to
    +                # chunked encoding.
    +                if finishing:
    +                    headers["Content-Length"] = str(len(chunk))
    +                else:
    +                    del headers["Content-Length"]
    +        return status_code, headers, chunk
    +
    +    def transform_chunk(self, chunk, finishing):
    +        if self._gzipping:
    +            self._gzip_file.write(chunk)
    +            if finishing:
    +                self._gzip_file.close()
    +            else:
    +                self._gzip_file.flush()
    +            chunk = self._gzip_value.getvalue()
    +            self._gzip_value.truncate(0)
    +            self._gzip_value.seek(0)
    +        return chunk
    +
    +
    +def authenticated(method):
    +    """Decorate methods with this to require that the user be logged in.
    +
    +    If the user is not logged in, they will be redirected to the configured
    +    `login url `.
    +
    +    If you configure a login url with a query parameter, Tornado will
    +    assume you know what you're doing and use it as-is.  If not, it
    +    will add a `next` parameter so the login page knows where to send
    +    you once you're logged in.
    +    """
    +    @functools.wraps(method)
    +    def wrapper(self, *args, **kwargs):
    +        if not self.current_user:
    +            if self.request.method in ("GET", "HEAD"):
    +                url = self.get_login_url()
    +                if "?" not in url:
    +                    if urlparse.urlsplit(url).scheme:
    +                        # if login url is absolute, make next absolute too
    +                        next_url = self.request.full_url()
    +                    else:
    +                        next_url = self.request.uri
    +                    url += "?" + urlencode(dict(next=next_url))
    +                self.redirect(url)
    +                return
    +            raise HTTPError(403)
    +        return method(self, *args, **kwargs)
    +    return wrapper
    +
    +
    +class UIModule(object):
    +    """A re-usable, modular UI unit on a page.
    +
    +    UI modules often execute additional queries, and they can include
    +    additional CSS and JavaScript that will be included in the output
    +    page, which is automatically inserted on page render.
    +
    +    Subclasses of UIModule must override the `render` method.
    +    """
    +    def __init__(self, handler):
    +        self.handler = handler
    +        self.request = handler.request
    +        self.ui = handler.ui
    +        self.locale = handler.locale
    +
    +    @property
    +    def current_user(self):
    +        return self.handler.current_user
    +
    +    def render(self, *args, **kwargs):
    +        """Override in subclasses to return this module's output."""
    +        raise NotImplementedError()
    +
    +    def embedded_javascript(self):
    +        """Override to return a JavaScript string
    +        to be embedded in the page."""
    +        return None
    +
    +    def javascript_files(self):
    +        """Override to return a list of JavaScript files needed by this module.
    +
    +        If the return values are relative paths, they will be passed to
    +        `RequestHandler.static_url`; otherwise they will be used as-is.
    +        """
    +        return None
    +
    +    def embedded_css(self):
    +        """Override to return a CSS string
    +        that will be embedded in the page."""
    +        return None
    +
    +    def css_files(self):
    +        """Override to returns a list of CSS files required by this module.
    +
    +        If the return values are relative paths, they will be passed to
    +        `RequestHandler.static_url`; otherwise they will be used as-is.
    +        """
    +        return None
    +
    +    def html_head(self):
    +        """Override to return an HTML string that will be put in the 
    +        element.
    +        """
    +        return None
    +
    +    def html_body(self):
    +        """Override to return an HTML string that will be put at the end of
    +        the  element.
    +        """
    +        return None
    +
    +    def render_string(self, path, **kwargs):
    +        """Renders a template and returns it as a string."""
    +        return self.handler.render_string(path, **kwargs)
    +
    +
    +class _linkify(UIModule):
    +    def render(self, text, **kwargs):
    +        return escape.linkify(text, **kwargs)
    +
    +
    +class _xsrf_form_html(UIModule):
    +    def render(self):
    +        return self.handler.xsrf_form_html()
    +
    +
    +class TemplateModule(UIModule):
    +    """UIModule that simply renders the given template.
    +
    +    {% module Template("foo.html") %} is similar to {% include "foo.html" %},
    +    but the module version gets its own namespace (with kwargs passed to
    +    Template()) instead of inheriting the outer template's namespace.
    +
    +    Templates rendered through this module also get access to UIModule's
    +    automatic javascript/css features.  Simply call set_resources
    +    inside the template and give it keyword arguments corresponding to
    +    the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
    +    Note that these resources are output once per template file, not once
    +    per instantiation of the template, so they must not depend on
    +    any arguments to the template.
    +    """
    +    def __init__(self, handler):
    +        super(TemplateModule, self).__init__(handler)
    +        # keep resources in both a list and a dict to preserve order
    +        self._resource_list = []
    +        self._resource_dict = {}
    +
    +    def render(self, path, **kwargs):
    +        def set_resources(**kwargs):
    +            if path not in self._resource_dict:
    +                self._resource_list.append(kwargs)
    +                self._resource_dict[path] = kwargs
    +            else:
    +                if self._resource_dict[path] != kwargs:
    +                    raise ValueError("set_resources called with different "
    +                                     "resources for the same template")
    +            return ""
    +        return self.render_string(path, set_resources=set_resources,
    +                                  **kwargs)
    +
    +    def _get_resources(self, key):
    +        return (r[key] for r in self._resource_list if key in r)
    +
    +    def embedded_javascript(self):
    +        return "\n".join(self._get_resources("embedded_javascript"))
    +
    +    def javascript_files(self):
    +        result = []
    +        for f in self._get_resources("javascript_files"):
    +            if isinstance(f, (unicode_type, bytes)):
    +                result.append(f)
    +            else:
    +                result.extend(f)
    +        return result
    +
    +    def embedded_css(self):
    +        return "\n".join(self._get_resources("embedded_css"))
    +
    +    def css_files(self):
    +        result = []
    +        for f in self._get_resources("css_files"):
    +            if isinstance(f, (unicode_type, bytes)):
    +                result.append(f)
    +            else:
    +                result.extend(f)
    +        return result
    +
    +    def html_head(self):
    +        return "".join(self._get_resources("html_head"))
    +
    +    def html_body(self):
    +        return "".join(self._get_resources("html_body"))
    +
    +
    +class _UIModuleNamespace(object):
    +    """Lazy namespace which creates UIModule proxies bound to a handler."""
    +    def __init__(self, handler, ui_modules):
    +        self.handler = handler
    +        self.ui_modules = ui_modules
    +
    +    def __getitem__(self, key):
    +        return self.handler._ui_module(key, self.ui_modules[key])
    +
    +    def __getattr__(self, key):
    +        try:
    +            return self[key]
    +        except KeyError as e:
    +            raise AttributeError(str(e))
    +
    +
    +if hasattr(hmac, 'compare_digest'):  # python 3.3
    +    _time_independent_equals = hmac.compare_digest
    +else:
    +    def _time_independent_equals(a, b):
    +        if len(a) != len(b):
    +            return False
    +        result = 0
    +        if isinstance(a[0], int):  # python3 byte strings
    +            for x, y in zip(a, b):
    +                result |= x ^ y
    +        else:  # python2
    +            for x, y in zip(a, b):
    +                result |= ord(x) ^ ord(y)
    +        return result == 0
    +
    +
    +def create_signed_value(secret, name, value, version=None, clock=None,
    +                        key_version=None):
    +    if version is None:
    +        version = DEFAULT_SIGNED_VALUE_VERSION
    +    if clock is None:
    +        clock = time.time
    +
    +    timestamp = utf8(str(int(clock())))
    +    value = base64.b64encode(utf8(value))
    +    if version == 1:
    +        signature = _create_signature_v1(secret, name, value, timestamp)
    +        value = b"|".join([value, timestamp, signature])
    +        return value
    +    elif version == 2:
    +        # The v2 format consists of a version number and a series of
    +        # length-prefixed fields "%d:%s", the last of which is a
    +        # signature, all separated by pipes.  All numbers are in
    +        # decimal format with no leading zeros.  The signature is an
    +        # HMAC-SHA256 of the whole string up to that point, including
    +        # the final pipe.
    +        #
    +        # The fields are:
    +        # - format version (i.e. 2; no length prefix)
    +        # - key version (integer, default is 0)
    +        # - timestamp (integer seconds since epoch)
    +        # - name (not encoded; assumed to be ~alphanumeric)
    +        # - value (base64-encoded)
    +        # - signature (hex-encoded; no length prefix)
    +        def format_field(s):
    +            return utf8("%d:" % len(s)) + utf8(s)
    +        to_sign = b"|".join([
    +            b"2",
    +            format_field(str(key_version or 0)),
    +            format_field(timestamp),
    +            format_field(name),
    +            format_field(value),
    +            b''])
    +
    +        if isinstance(secret, dict):
    +            assert key_version is not None, 'Key version must be set when sign key dict is used'
    +            assert version >= 2, 'Version must be at least 2 for key version support'
    +            secret = secret[key_version]
    +
    +        signature = _create_signature_v2(secret, to_sign)
    +        return to_sign + signature
    +    else:
    +        raise ValueError("Unsupported version %d" % version)
    +
    +
    +# A leading version number in decimal
    +# with no leading zeros, followed by a pipe.
    +_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
    +
    +
    +def _get_version(value):
    +    # Figures out what version value is.  Version 1 did not include an
    +    # explicit version field and started with arbitrary base64 data,
    +    # which makes this tricky.
    +    m = _signed_value_version_re.match(value)
    +    if m is None:
    +        version = 1
    +    else:
    +        try:
    +            version = int(m.group(1))
    +            if version > 999:
    +                # Certain payloads from the version-less v1 format may
    +                # be parsed as valid integers.  Due to base64 padding
    +                # restrictions, this can only happen for numbers whose
    +                # length is a multiple of 4, so we can treat all
    +                # numbers up to 999 as versions, and for the rest we
    +                # fall back to v1 format.
    +                version = 1
    +        except ValueError:
    +            version = 1
    +    return version
    +
    +
    +def decode_signed_value(secret, name, value, max_age_days=31,
    +                        clock=None, min_version=None):
    +    if clock is None:
    +        clock = time.time
    +    if min_version is None:
    +        min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
    +    if min_version > 2:
    +        raise ValueError("Unsupported min_version %d" % min_version)
    +    if not value:
    +        return None
    +
    +    value = utf8(value)
    +    version = _get_version(value)
    +
    +    if version < min_version:
    +        return None
    +    if version == 1:
    +        return _decode_signed_value_v1(secret, name, value,
    +                                       max_age_days, clock)
    +    elif version == 2:
    +        return _decode_signed_value_v2(secret, name, value,
    +                                       max_age_days, clock)
    +    else:
    +        return None
    +
    +
    +def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
    +    parts = utf8(value).split(b"|")
    +    if len(parts) != 3:
    +        return None
    +    signature = _create_signature_v1(secret, name, parts[0], parts[1])
    +    if not _time_independent_equals(parts[2], signature):
    +        gen_log.warning("Invalid cookie signature %r", value)
    +        return None
    +    timestamp = int(parts[1])
    +    if timestamp < clock() - max_age_days * 86400:
    +        gen_log.warning("Expired cookie %r", value)
    +        return None
    +    if timestamp > clock() + 31 * 86400:
    +        # _cookie_signature does not hash a delimiter between the
    +        # parts of the cookie, so an attacker could transfer trailing
    +        # digits from the payload to the timestamp without altering the
    +        # signature.  For backwards compatibility, sanity-check timestamp
    +        # here instead of modifying _cookie_signature.
    +        gen_log.warning("Cookie timestamp in future; possible tampering %r",
    +                        value)
    +        return None
    +    if parts[1].startswith(b"0"):
    +        gen_log.warning("Tampered cookie %r", value)
    +        return None
    +    try:
    +        return base64.b64decode(parts[0])
    +    except Exception:
    +        return None
    +
    +
    +def _decode_fields_v2(value):
    +    def _consume_field(s):
    +        length, _, rest = s.partition(b':')
    +        n = int(length)
    +        field_value = rest[:n]
    +        # In python 3, indexing bytes returns small integers; we must
    +        # use a slice to get a byte string as in python 2.
    +        if rest[n:n + 1] != b'|':
    +            raise ValueError("malformed v2 signed value field")
    +        rest = rest[n + 1:]
    +        return field_value, rest
    +
    +    rest = value[2:]  # remove version number
    +    key_version, rest = _consume_field(rest)
    +    timestamp, rest = _consume_field(rest)
    +    name_field, rest = _consume_field(rest)
    +    value_field, passed_sig = _consume_field(rest)
    +    return int(key_version), timestamp, name_field, value_field, passed_sig
    +
    +
    +def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
    +    try:
    +        key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
    +    except ValueError:
    +        return None
    +    signed_string = value[:-len(passed_sig)]
    +
    +    if isinstance(secret, dict):
    +        try:
    +            secret = secret[key_version]
    +        except KeyError:
    +            return None
    +
    +    expected_sig = _create_signature_v2(secret, signed_string)
    +    if not _time_independent_equals(passed_sig, expected_sig):
    +        return None
    +    if name_field != utf8(name):
    +        return None
    +    timestamp = int(timestamp)
    +    if timestamp < clock() - max_age_days * 86400:
    +        # The signature has expired.
    +        return None
    +    try:
    +        return base64.b64decode(value_field)
    +    except Exception:
    +        return None
    +
    +
    +def get_signature_key_version(value):
    +    value = utf8(value)
    +    version = _get_version(value)
    +    if version < 2:
    +        return None
    +    try:
    +        key_version, _, _, _, _ = _decode_fields_v2(value)
    +    except ValueError:
    +        return None
    +
    +    return key_version
    +
    +
    +def _create_signature_v1(secret, *parts):
    +    hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
    +    for part in parts:
    +        hash.update(utf8(part))
    +    return utf8(hash.hexdigest())
    +
    +
    +def _create_signature_v2(secret, s):
    +    hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
    +    hash.update(utf8(s))
    +    return utf8(hash.hexdigest())
    +
    +
    +def is_absolute(path):
    +    return any(path.startswith(x) for x in ["/", "http:", "https:"])
    diff --git a/server/www/packages/packages-windows/x86/tornado/websocket.py b/server/www/packages/packages-windows/x86/tornado/websocket.py
    new file mode 100644
    index 0000000..0b994fc
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/tornado/websocket.py
    @@ -0,0 +1,1342 @@
    +"""Implementation of the WebSocket protocol.
    +
    +`WebSockets `_ allow for bidirectional
    +communication between the browser and server.
    +
    +WebSockets are supported in the current versions of all major browsers,
    +although older versions that do not support WebSockets are still in use
    +(refer to http://caniuse.com/websockets for details).
    +
    +This module implements the final version of the WebSocket protocol as
    +defined in `RFC 6455 `_.  Certain
    +browser versions (notably Safari 5.x) implemented an earlier draft of
    +the protocol (known as "draft 76") and are not compatible with this module.
    +
    +.. versionchanged:: 4.0
    +   Removed support for the draft 76 protocol version.
    +"""
    +
    +from __future__ import absolute_import, division, print_function
    +
    +import base64
    +import hashlib
    +import os
    +import sys
    +import struct
    +import tornado.escape
    +import tornado.web
    +import zlib
    +
    +from tornado.concurrent import Future, future_set_result_unless_cancelled
    +from tornado.escape import utf8, native_str, to_unicode
    +from tornado import gen, httpclient, httputil
    +from tornado.ioloop import IOLoop, PeriodicCallback
    +from tornado.iostream import StreamClosedError
    +from tornado.log import gen_log
    +from tornado import simple_httpclient
    +from tornado.queues import Queue
    +from tornado.tcpclient import TCPClient
    +from tornado.util import _websocket_mask, PY3
    +
    +if PY3:
    +    from urllib.parse import urlparse  # py2
    +    xrange = range
    +else:
    +    from urlparse import urlparse  # py3
    +
    +_default_max_message_size = 10 * 1024 * 1024
    +
    +
    +class WebSocketError(Exception):
    +    pass
    +
    +
    +class WebSocketClosedError(WebSocketError):
    +    """Raised by operations on a closed connection.
    +
    +    .. versionadded:: 3.2
    +    """
    +    pass
    +
    +
    +class _DecompressTooLargeError(Exception):
    +    pass
    +
    +
    +class WebSocketHandler(tornado.web.RequestHandler):
    +    """Subclass this class to create a basic WebSocket handler.
    +
    +    Override `on_message` to handle incoming messages, and use
    +    `write_message` to send messages to the client. You can also
    +    override `open` and `on_close` to handle opened and closed
    +    connections.
    +
    +    Custom upgrade response headers can be sent by overriding
    +    `~tornado.web.RequestHandler.set_default_headers` or
    +    `~tornado.web.RequestHandler.prepare`.
    +
    +    See http://dev.w3.org/html5/websockets/ for details on the
    +    JavaScript interface.  The protocol is specified at
    +    http://tools.ietf.org/html/rfc6455.
    +
    +    Here is an example WebSocket handler that echos back all received messages
    +    back to the client:
    +
    +    .. testcode::
    +
    +      class EchoWebSocket(tornado.websocket.WebSocketHandler):
    +          def open(self):
    +              print("WebSocket opened")
    +
    +          def on_message(self, message):
    +              self.write_message(u"You said: " + message)
    +
    +          def on_close(self):
    +              print("WebSocket closed")
    +
    +    .. testoutput::
    +       :hide:
    +
    +    WebSockets are not standard HTTP connections. The "handshake" is
    +    HTTP, but after the handshake, the protocol is
    +    message-based. Consequently, most of the Tornado HTTP facilities
    +    are not available in handlers of this type. The only communication
    +    methods available to you are `write_message()`, `ping()`, and
    +    `close()`. Likewise, your request handler class should implement
    +    `open()` method rather than ``get()`` or ``post()``.
    +
    +    If you map the handler above to ``/websocket`` in your application, you can
    +    invoke it in JavaScript with::
    +
    +      var ws = new WebSocket("ws://localhost:8888/websocket");
    +      ws.onopen = function() {
    +         ws.send("Hello, world");
    +      };
    +      ws.onmessage = function (evt) {
    +         alert(evt.data);
    +      };
    +
    +    This script pops up an alert box that says "You said: Hello, world".
    +
    +    Web browsers allow any site to open a websocket connection to any other,
    +    instead of using the same-origin policy that governs other network
    +    access from javascript.  This can be surprising and is a potential
    +    security hole, so since Tornado 4.0 `WebSocketHandler` requires
    +    applications that wish to receive cross-origin websockets to opt in
    +    by overriding the `~WebSocketHandler.check_origin` method (see that
    +    method's docs for details).  Failure to do so is the most likely
    +    cause of 403 errors when making a websocket connection.
    +
    +    When using a secure websocket connection (``wss://``) with a self-signed
    +    certificate, the connection from a browser may fail because it wants
    +    to show the "accept this certificate" dialog but has nowhere to show it.
    +    You must first visit a regular HTML page using the same certificate
    +    to accept it before the websocket connection will succeed.
    +
    +    If the application setting ``websocket_ping_interval`` has a non-zero
    +    value, a ping will be sent periodically, and the connection will be
    +    closed if a response is not received before the ``websocket_ping_timeout``.
    +
    +    Messages larger than the ``websocket_max_message_size`` application setting
    +    (default 10MiB) will not be accepted.
    +
    +    .. versionchanged:: 4.5
    +       Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
    +       ``websocket_max_message_size``.
    +    """
    +    def __init__(self, application, request, **kwargs):
    +        super(WebSocketHandler, self).__init__(application, request, **kwargs)
    +        self.ws_connection = None
    +        self.close_code = None
    +        self.close_reason = None
    +        self.stream = None
    +        self._on_close_called = False
    +
    +    def get(self, *args, **kwargs):
    +        self.open_args = args
    +        self.open_kwargs = kwargs
    +
    +        # Upgrade header should be present and should be equal to WebSocket
    +        if self.request.headers.get("Upgrade", "").lower() != 'websocket':
    +            self.set_status(400)
    +            log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
    +            self.finish(log_msg)
    +            gen_log.debug(log_msg)
    +            return
    +
    +        # Connection header should be upgrade.
    +        # Some proxy servers/load balancers
    +        # might mess with it.
    +        headers = self.request.headers
    +        connection = map(lambda s: s.strip().lower(),
    +                         headers.get("Connection", "").split(","))
    +        if 'upgrade' not in connection:
    +            self.set_status(400)
    +            log_msg = "\"Connection\" must be \"Upgrade\"."
    +            self.finish(log_msg)
    +            gen_log.debug(log_msg)
    +            return
    +
    +        # Handle WebSocket Origin naming convention differences
    +        # The difference between version 8 and 13 is that in 8 the
    +        # client sends a "Sec-Websocket-Origin" header and in 13 it's
    +        # simply "Origin".
    +        if "Origin" in self.request.headers:
    +            origin = self.request.headers.get("Origin")
    +        else:
    +            origin = self.request.headers.get("Sec-Websocket-Origin", None)
    +
    +        # If there was an origin header, check to make sure it matches
    +        # according to check_origin. When the origin is None, we assume it
    +        # did not come from a browser and that it can be passed on.
    +        if origin is not None and not self.check_origin(origin):
    +            self.set_status(403)
    +            log_msg = "Cross origin websockets not allowed"
    +            self.finish(log_msg)
    +            gen_log.debug(log_msg)
    +            return
    +
    +        self.ws_connection = self.get_websocket_protocol()
    +        if self.ws_connection:
    +            self.ws_connection.accept_connection()
    +        else:
    +            self.set_status(426, "Upgrade Required")
    +            self.set_header("Sec-WebSocket-Version", "7, 8, 13")
    +            self.finish()
    +
    +    stream = None
    +
    +    @property
    +    def ping_interval(self):
    +        """The interval for websocket keep-alive pings.
    +
    +        Set websocket_ping_interval = 0 to disable pings.
    +        """
    +        return self.settings.get('websocket_ping_interval', None)
    +
    +    @property
    +    def ping_timeout(self):
    +        """If no ping is received in this many seconds,
    +        close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
    +        Default is max of 3 pings or 30 seconds.
    +        """
    +        return self.settings.get('websocket_ping_timeout', None)
    +
    +    @property
    +    def max_message_size(self):
    +        """Maximum allowed message size.
    +
    +        If the remote peer sends a message larger than this, the connection
    +        will be closed.
    +
    +        Default is 10MiB.
    +        """
    +        return self.settings.get('websocket_max_message_size', _default_max_message_size)
    +
    +    def write_message(self, message, binary=False):
    +        """Sends the given message to the client of this Web Socket.
    +
    +        The message may be either a string or a dict (which will be
    +        encoded as json).  If the ``binary`` argument is false, the
    +        message will be sent as utf8; in binary mode any byte string
    +        is allowed.
    +
    +        If the connection is already closed, raises `WebSocketClosedError`.
    +        Returns a `.Future` which can be used for flow control.
    +
    +        .. versionchanged:: 3.2
    +           `WebSocketClosedError` was added (previously a closed connection
    +           would raise an `AttributeError`)
    +
    +        .. versionchanged:: 4.3
    +           Returns a `.Future` which can be used for flow control.
    +
    +        .. versionchanged:: 5.0
    +           Consistently raises `WebSocketClosedError`. Previously could
    +           sometimes raise `.StreamClosedError`.
    +        """
    +        if self.ws_connection is None:
    +            raise WebSocketClosedError()
    +        if isinstance(message, dict):
    +            message = tornado.escape.json_encode(message)
    +        return self.ws_connection.write_message(message, binary=binary)
    +
    +    def select_subprotocol(self, subprotocols):
    +        """Override to implement subprotocol negotiation.
    +
    +        ``subprotocols`` is a list of strings identifying the
    +        subprotocols proposed by the client.  This method may be
    +        overridden to return one of those strings to select it, or
    +        ``None`` to not select a subprotocol.
    +
    +        Failure to select a subprotocol does not automatically abort
    +        the connection, although clients may close the connection if
    +        none of their proposed subprotocols was selected.
    +
    +        The list may be empty, in which case this method must return
    +        None. This method is always called exactly once even if no
    +        subprotocols were proposed so that the handler can be advised
    +        of this fact.
    +
    +        .. versionchanged:: 5.1
    +
    +           Previously, this method was called with a list containing
    +           an empty string instead of an empty list if no subprotocols
    +           were proposed by the client.
    +        """
    +        return None
    +
    +    @property
    +    def selected_subprotocol(self):
    +        """The subprotocol returned by `select_subprotocol`.
    +
    +        .. versionadded:: 5.1
    +        """
    +        return self.ws_connection.selected_subprotocol
    +
    +    def get_compression_options(self):
    +        """Override to return compression options for the connection.
    +
    +        If this method returns None (the default), compression will
    +        be disabled.  If it returns a dict (even an empty one), it
    +        will be enabled.  The contents of the dict may be used to
    +        control the following compression options:
    +
    +        ``compression_level`` specifies the compression level.
    +
    +        ``mem_level`` specifies the amount of memory used for the internal compression state.
    +
    +         These parameters are documented in details here:
    +         https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
    +
    +        .. versionadded:: 4.1
    +
    +        .. versionchanged:: 4.5
    +
    +           Added ``compression_level`` and ``mem_level``.
    +        """
    +        # TODO: Add wbits option.
    +        return None
    +
    +    def open(self, *args, **kwargs):
    +        """Invoked when a new WebSocket is opened.
    +
    +        The arguments to `open` are extracted from the `tornado.web.URLSpec`
    +        regular expression, just like the arguments to
    +        `tornado.web.RequestHandler.get`.
    +
    +        `open` may be a coroutine. `on_message` will not be called until
    +        `open` has returned.
    +
    +        .. versionchanged:: 5.1
    +
    +           ``open`` may be a coroutine.
    +        """
    +        pass
    +
    +    def on_message(self, message):
    +        """Handle incoming messages on the WebSocket
    +
    +        This method must be overridden.
    +
    +        .. versionchanged:: 4.5
    +
    +           ``on_message`` can be a coroutine.
    +        """
    +        raise NotImplementedError
    +
    +    def ping(self, data=b''):
    +        """Send ping frame to the remote end.
    +
    +        The data argument allows a small amount of data (up to 125
    +        bytes) to be sent as a part of the ping message. Note that not
    +        all websocket implementations expose this data to
    +        applications.
    +
    +        Consider using the ``websocket_ping_interval`` application
    +        setting instead of sending pings manually.
    +
    +        .. versionchanged:: 5.1
    +
    +           The data argument is now optional.
    +
    +        """
    +        data = utf8(data)
    +        if self.ws_connection is None:
    +            raise WebSocketClosedError()
    +        self.ws_connection.write_ping(data)
    +
    +    def on_pong(self, data):
    +        """Invoked when the response to a ping frame is received."""
    +        pass
    +
    +    def on_ping(self, data):
    +        """Invoked when the a ping frame is received."""
    +        pass
    +
    +    def on_close(self):
    +        """Invoked when the WebSocket is closed.
    +
    +        If the connection was closed cleanly and a status code or reason
    +        phrase was supplied, these values will be available as the attributes
    +        ``self.close_code`` and ``self.close_reason``.
    +
    +        .. versionchanged:: 4.0
    +
    +           Added ``close_code`` and ``close_reason`` attributes.
    +        """
    +        pass
    +
    +    def close(self, code=None, reason=None):
    +        """Closes this Web Socket.
    +
    +        Once the close handshake is successful the socket will be closed.
    +
    +        ``code`` may be a numeric status code, taken from the values
    +        defined in `RFC 6455 section 7.4.1
    +        `_.
    +        ``reason`` may be a textual message about why the connection is
    +        closing.  These values are made available to the client, but are
    +        not otherwise interpreted by the websocket protocol.
    +
    +        .. versionchanged:: 4.0
    +
    +           Added the ``code`` and ``reason`` arguments.
    +        """
    +        if self.ws_connection:
    +            self.ws_connection.close(code, reason)
    +            self.ws_connection = None
    +
    +    def check_origin(self, origin):
    +        """Override to enable support for allowing alternate origins.
    +
    +        The ``origin`` argument is the value of the ``Origin`` HTTP
    +        header, the url responsible for initiating this request.  This
    +        method is not called for clients that do not send this header;
    +        such requests are always allowed (because all browsers that
    +        implement WebSockets support this header, and non-browser
    +        clients do not have the same cross-site security concerns).
    +
    +        Should return True to accept the request or False to reject it.
    +        By default, rejects all requests with an origin on a host other
    +        than this one.
    +
    +        This is a security protection against cross site scripting attacks on
    +        browsers, since WebSockets are allowed to bypass the usual same-origin
    +        policies and don't use CORS headers.
    +
    +        .. warning::
    +
    +           This is an important security measure; don't disable it
    +           without understanding the security implications. In
    +           particular, if your authentication is cookie-based, you
    +           must either restrict the origins allowed by
    +           ``check_origin()`` or implement your own XSRF-like
    +           protection for websocket connections. See `these
    +           `_
    +           `articles
    +           `_
    +           for more.
    +
    +        To accept all cross-origin traffic (which was the default prior to
    +        Tornado 4.0), simply override this method to always return true::
    +
    +            def check_origin(self, origin):
    +                return True
    +
    +        To allow connections from any subdomain of your site, you might
    +        do something like::
    +
    +            def check_origin(self, origin):
    +                parsed_origin = urllib.parse.urlparse(origin)
    +                return parsed_origin.netloc.endswith(".mydomain.com")
    +
    +        .. versionadded:: 4.0
    +
    +        """
    +        parsed_origin = urlparse(origin)
    +        origin = parsed_origin.netloc
    +        origin = origin.lower()
    +
    +        host = self.request.headers.get("Host")
    +
    +        # Check to see that origin matches host directly, including ports
    +        return origin == host
    +
    +    def set_nodelay(self, value):
    +        """Set the no-delay flag for this stream.
    +
    +        By default, small messages may be delayed and/or combined to minimize
    +        the number of packets sent.  This can sometimes cause 200-500ms delays
    +        due to the interaction between Nagle's algorithm and TCP delayed
    +        ACKs.  To reduce this delay (at the expense of possibly increasing
    +        bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
    +        connection is established.
    +
    +        See `.BaseIOStream.set_nodelay` for additional details.
    +
    +        .. versionadded:: 3.1
    +        """
    +        self.stream.set_nodelay(value)
    +
    +    def on_connection_close(self):
    +        if self.ws_connection:
    +            self.ws_connection.on_connection_close()
    +            self.ws_connection = None
    +        if not self._on_close_called:
    +            self._on_close_called = True
    +            self.on_close()
    +            self._break_cycles()
    +
    +    def _break_cycles(self):
    +        # WebSocketHandlers call finish() early, but we don't want to
    +        # break up reference cycles (which makes it impossible to call
    +        # self.render_string) until after we've really closed the
    +        # connection (if it was established in the first place,
    +        # indicated by status code 101).
    +        if self.get_status() != 101 or self._on_close_called:
    +            super(WebSocketHandler, self)._break_cycles()
    +
    +    def send_error(self, *args, **kwargs):
    +        if self.stream is None:
    +            super(WebSocketHandler, self).send_error(*args, **kwargs)
    +        else:
    +            # If we get an uncaught exception during the handshake,
    +            # we have no choice but to abruptly close the connection.
    +            # TODO: for uncaught exceptions after the handshake,
    +            # we can close the connection more gracefully.
    +            self.stream.close()
    +
    +    def get_websocket_protocol(self):
    +        websocket_version = self.request.headers.get("Sec-WebSocket-Version")
    +        if websocket_version in ("7", "8", "13"):
    +            return WebSocketProtocol13(
    +                self, compression_options=self.get_compression_options())
    +
    +    def _attach_stream(self):
    +        self.stream = self.detach()
    +        self.stream.set_close_callback(self.on_connection_close)
    +        # disable non-WS methods
    +        for method in ["write", "redirect", "set_header", "set_cookie",
    +                       "set_status", "flush", "finish"]:
    +            setattr(self, method, _raise_not_supported_for_websockets)
    +
    +
    +def _raise_not_supported_for_websockets(*args, **kwargs):
    +    raise RuntimeError("Method not supported for Web Sockets")
    +
    +
    +class WebSocketProtocol(object):
    +    """Base class for WebSocket protocol versions.
    +    """
    +    def __init__(self, handler):
    +        self.handler = handler
    +        self.request = handler.request
    +        self.stream = handler.stream
    +        self.client_terminated = False
    +        self.server_terminated = False
    +
    +    def _run_callback(self, callback, *args, **kwargs):
    +        """Runs the given callback with exception handling.
    +
    +        If the callback is a coroutine, returns its Future. On error, aborts the
    +        websocket connection and returns None.
    +        """
    +        try:
    +            result = callback(*args, **kwargs)
    +        except Exception:
    +            self.handler.log_exception(*sys.exc_info())
    +            self._abort()
    +        else:
    +            if result is not None:
    +                result = gen.convert_yielded(result)
    +                self.stream.io_loop.add_future(result, lambda f: f.result())
    +            return result
    +
    +    def on_connection_close(self):
    +        self._abort()
    +
    +    def _abort(self):
    +        """Instantly aborts the WebSocket connection by closing the socket"""
    +        self.client_terminated = True
    +        self.server_terminated = True
    +        self.stream.close()  # forcibly tear down the connection
    +        self.close()  # let the subclass cleanup
    +
    +
    +class _PerMessageDeflateCompressor(object):
    +    def __init__(self, persistent, max_wbits, compression_options=None):
    +        if max_wbits is None:
    +            max_wbits = zlib.MAX_WBITS
    +        # There is no symbolic constant for the minimum wbits value.
    +        if not (8 <= max_wbits <= zlib.MAX_WBITS):
    +            raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
    +                             max_wbits, zlib.MAX_WBITS)
    +        self._max_wbits = max_wbits
    +
    +        if compression_options is None or 'compression_level' not in compression_options:
    +            self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
    +        else:
    +            self._compression_level = compression_options['compression_level']
    +
    +        if compression_options is None or 'mem_level' not in compression_options:
    +            self._mem_level = 8
    +        else:
    +            self._mem_level = compression_options['mem_level']
    +
    +        if persistent:
    +            self._compressor = self._create_compressor()
    +        else:
    +            self._compressor = None
    +
    +    def _create_compressor(self):
    +        return zlib.compressobj(self._compression_level,
    +                                zlib.DEFLATED, -self._max_wbits, self._mem_level)
    +
    +    def compress(self, data):
    +        compressor = self._compressor or self._create_compressor()
    +        data = (compressor.compress(data) +
    +                compressor.flush(zlib.Z_SYNC_FLUSH))
    +        assert data.endswith(b'\x00\x00\xff\xff')
    +        return data[:-4]
    +
    +
    +class _PerMessageDeflateDecompressor(object):
    +    def __init__(self, persistent, max_wbits, max_message_size, compression_options=None):
    +        self._max_message_size = max_message_size
    +        if max_wbits is None:
    +            max_wbits = zlib.MAX_WBITS
    +        if not (8 <= max_wbits <= zlib.MAX_WBITS):
    +            raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
    +                             max_wbits, zlib.MAX_WBITS)
    +        self._max_wbits = max_wbits
    +        if persistent:
    +            self._decompressor = self._create_decompressor()
    +        else:
    +            self._decompressor = None
    +
    +    def _create_decompressor(self):
    +        return zlib.decompressobj(-self._max_wbits)
    +
    +    def decompress(self, data):
    +        decompressor = self._decompressor or self._create_decompressor()
    +        result = decompressor.decompress(data + b'\x00\x00\xff\xff', self._max_message_size)
    +        if decompressor.unconsumed_tail:
    +            raise _DecompressTooLargeError()
    +        return result
    +
    +
    +class WebSocketProtocol13(WebSocketProtocol):
    +    """Implementation of the WebSocket protocol from RFC 6455.
    +
    +    This class supports versions 7 and 8 of the protocol in addition to the
    +    final version 13.
    +    """
    +    # Bit masks for the first byte of a frame.
    +    FIN = 0x80
    +    RSV1 = 0x40
    +    RSV2 = 0x20
    +    RSV3 = 0x10
    +    RSV_MASK = RSV1 | RSV2 | RSV3
    +    OPCODE_MASK = 0x0f
    +
    +    def __init__(self, handler, mask_outgoing=False,
    +                 compression_options=None):
    +        WebSocketProtocol.__init__(self, handler)
    +        self.mask_outgoing = mask_outgoing
    +        self._final_frame = False
    +        self._frame_opcode = None
    +        self._masked_frame = None
    +        self._frame_mask = None
    +        self._frame_length = None
    +        self._fragmented_message_buffer = None
    +        self._fragmented_message_opcode = None
    +        self._waiting = None
    +        self._compression_options = compression_options
    +        self._decompressor = None
    +        self._compressor = None
    +        self._frame_compressed = None
    +        # The total uncompressed size of all messages received or sent.
    +        # Unicode messages are encoded to utf8.
    +        # Only for testing; subject to change.
    +        self._message_bytes_in = 0
    +        self._message_bytes_out = 0
    +        # The total size of all packets received or sent.  Includes
    +        # the effect of compression, frame overhead, and control frames.
    +        self._wire_bytes_in = 0
    +        self._wire_bytes_out = 0
    +        self.ping_callback = None
    +        self.last_ping = 0
    +        self.last_pong = 0
    +
    +    def accept_connection(self):
    +        try:
    +            self._handle_websocket_headers()
    +        except ValueError:
    +            self.handler.set_status(400)
    +            log_msg = "Missing/Invalid WebSocket headers"
    +            self.handler.finish(log_msg)
    +            gen_log.debug(log_msg)
    +            return
    +
    +        try:
    +            self._accept_connection()
    +        except ValueError:
    +            gen_log.debug("Malformed WebSocket request received",
    +                          exc_info=True)
    +            self._abort()
    +            return
    +
    +    def _handle_websocket_headers(self):
    +        """Verifies all invariant- and required headers
    +
    +        If a header is missing or have an incorrect value ValueError will be
    +        raised
    +        """
    +        fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
    +        if not all(map(lambda f: self.request.headers.get(f), fields)):
    +            raise ValueError("Missing/Invalid WebSocket headers")
    +
    +    @staticmethod
    +    def compute_accept_value(key):
    +        """Computes the value for the Sec-WebSocket-Accept header,
    +        given the value for Sec-WebSocket-Key.
    +        """
    +        sha1 = hashlib.sha1()
    +        sha1.update(utf8(key))
    +        sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")  # Magic value
    +        return native_str(base64.b64encode(sha1.digest()))
    +
    +    def _challenge_response(self):
    +        return WebSocketProtocol13.compute_accept_value(
    +            self.request.headers.get("Sec-Websocket-Key"))
    +
    +    @gen.coroutine
    +    def _accept_connection(self):
    +        subprotocol_header = self.request.headers.get("Sec-WebSocket-Protocol")
    +        if subprotocol_header:
    +            subprotocols = [s.strip() for s in subprotocol_header.split(',')]
    +        else:
    +            subprotocols = []
    +        self.selected_subprotocol = self.handler.select_subprotocol(subprotocols)
    +        if self.selected_subprotocol:
    +            assert self.selected_subprotocol in subprotocols
    +            self.handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol)
    +
    +        extensions = self._parse_extensions_header(self.request.headers)
    +        for ext in extensions:
    +            if (ext[0] == 'permessage-deflate' and
    +                    self._compression_options is not None):
    +                # TODO: negotiate parameters if compression_options
    +                # specifies limits.
    +                self._create_compressors('server', ext[1], self._compression_options)
    +                if ('client_max_window_bits' in ext[1] and
    +                        ext[1]['client_max_window_bits'] is None):
    +                    # Don't echo an offered client_max_window_bits
    +                    # parameter with no value.
    +                    del ext[1]['client_max_window_bits']
    +                self.handler.set_header("Sec-WebSocket-Extensions",
    +                                        httputil._encode_header(
    +                                            'permessage-deflate', ext[1]))
    +                break
    +
    +        self.handler.clear_header("Content-Type")
    +        self.handler.set_status(101)
    +        self.handler.set_header("Upgrade", "websocket")
    +        self.handler.set_header("Connection", "Upgrade")
    +        self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
    +        self.handler.finish()
    +
    +        self.handler._attach_stream()
    +        self.stream = self.handler.stream
    +
    +        self.start_pinging()
    +        open_result = self._run_callback(self.handler.open, *self.handler.open_args,
    +                                         **self.handler.open_kwargs)
    +        if open_result is not None:
    +            yield open_result
    +        yield self._receive_frame_loop()
    +
    +    def _parse_extensions_header(self, headers):
    +        extensions = headers.get("Sec-WebSocket-Extensions", '')
    +        if extensions:
    +            return [httputil._parse_header(e.strip())
    +                    for e in extensions.split(',')]
    +        return []
    +
    +    def _process_server_headers(self, key, headers):
    +        """Process the headers sent by the server to this client connection.
    +
    +        'key' is the websocket handshake challenge/response key.
    +        """
    +        assert headers['Upgrade'].lower() == 'websocket'
    +        assert headers['Connection'].lower() == 'upgrade'
    +        accept = self.compute_accept_value(key)
    +        assert headers['Sec-Websocket-Accept'] == accept
    +
    +        extensions = self._parse_extensions_header(headers)
    +        for ext in extensions:
    +            if (ext[0] == 'permessage-deflate' and
    +                    self._compression_options is not None):
    +                self._create_compressors('client', ext[1])
    +            else:
    +                raise ValueError("unsupported extension %r", ext)
    +
    +        self.selected_subprotocol = headers.get('Sec-WebSocket-Protocol', None)
    +
    +    def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
    +        """Converts a websocket agreed_parameters set to keyword arguments
    +        for our compressor objects.
    +        """
    +        options = dict(
    +            persistent=(side + '_no_context_takeover') not in agreed_parameters)
    +        wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
    +        if wbits_header is None:
    +            options['max_wbits'] = zlib.MAX_WBITS
    +        else:
    +            options['max_wbits'] = int(wbits_header)
    +        options['compression_options'] = compression_options
    +        return options
    +
    +    def _create_compressors(self, side, agreed_parameters, compression_options=None):
    +        # TODO: handle invalid parameters gracefully
    +        allowed_keys = set(['server_no_context_takeover',
    +                            'client_no_context_takeover',
    +                            'server_max_window_bits',
    +                            'client_max_window_bits'])
    +        for key in agreed_parameters:
    +            if key not in allowed_keys:
    +                raise ValueError("unsupported compression parameter %r" % key)
    +        other_side = 'client' if (side == 'server') else 'server'
    +        self._compressor = _PerMessageDeflateCompressor(
    +            **self._get_compressor_options(side, agreed_parameters, compression_options))
    +        self._decompressor = _PerMessageDeflateDecompressor(
    +            max_message_size=self.handler.max_message_size,
    +            **self._get_compressor_options(other_side, agreed_parameters, compression_options))
    +
    +    def _write_frame(self, fin, opcode, data, flags=0):
    +        data_len = len(data)
    +        if opcode & 0x8:
    +            # All control frames MUST have a payload length of 125
    +            # bytes or less and MUST NOT be fragmented.
    +            if not fin:
    +                raise ValueError("control frames may not be fragmented")
    +            if data_len > 125:
    +                raise ValueError("control frame payloads may not exceed 125 bytes")
    +        if fin:
    +            finbit = self.FIN
    +        else:
    +            finbit = 0
    +        frame = struct.pack("B", finbit | opcode | flags)
    +        if self.mask_outgoing:
    +            mask_bit = 0x80
    +        else:
    +            mask_bit = 0
    +        if data_len < 126:
    +            frame += struct.pack("B", data_len | mask_bit)
    +        elif data_len <= 0xFFFF:
    +            frame += struct.pack("!BH", 126 | mask_bit, data_len)
    +        else:
    +            frame += struct.pack("!BQ", 127 | mask_bit, data_len)
    +        if self.mask_outgoing:
    +            mask = os.urandom(4)
    +            data = mask + _websocket_mask(mask, data)
    +        frame += data
    +        self._wire_bytes_out += len(frame)
    +        return self.stream.write(frame)
    +
    +    def write_message(self, message, binary=False):
    +        """Sends the given message to the client of this Web Socket."""
    +        if binary:
    +            opcode = 0x2
    +        else:
    +            opcode = 0x1
    +        message = tornado.escape.utf8(message)
    +        assert isinstance(message, bytes)
    +        self._message_bytes_out += len(message)
    +        flags = 0
    +        if self._compressor:
    +            message = self._compressor.compress(message)
    +            flags |= self.RSV1
    +        # For historical reasons, write methods in Tornado operate in a semi-synchronous
    +        # mode in which awaiting the Future they return is optional (But errors can
    +        # still be raised). This requires us to go through an awkward dance here
    +        # to transform the errors that may be returned while presenting the same
    +        # semi-synchronous interface.
    +        try:
    +            fut = self._write_frame(True, opcode, message, flags=flags)
    +        except StreamClosedError:
    +            raise WebSocketClosedError()
    +
    +        @gen.coroutine
    +        def wrapper():
    +            try:
    +                yield fut
    +            except StreamClosedError:
    +                raise WebSocketClosedError()
    +        return wrapper()
    +
    +    def write_ping(self, data):
    +        """Send ping frame."""
    +        assert isinstance(data, bytes)
    +        self._write_frame(True, 0x9, data)
    +
    +    @gen.coroutine
    +    def _receive_frame_loop(self):
    +        try:
    +            while not self.client_terminated:
    +                yield self._receive_frame()
    +        except StreamClosedError:
    +            self._abort()
    +
    +    def _read_bytes(self, n):
    +        self._wire_bytes_in += n
    +        return self.stream.read_bytes(n)
    +
    +    @gen.coroutine
    +    def _receive_frame(self):
    +        # Read the frame header.
    +        data = yield self._read_bytes(2)
    +        header, mask_payloadlen = struct.unpack("BB", data)
    +        is_final_frame = header & self.FIN
    +        reserved_bits = header & self.RSV_MASK
    +        opcode = header & self.OPCODE_MASK
    +        opcode_is_control = opcode & 0x8
    +        if self._decompressor is not None and opcode != 0:
    +            # Compression flag is present in the first frame's header,
    +            # but we can't decompress until we have all the frames of
    +            # the message.
    +            self._frame_compressed = bool(reserved_bits & self.RSV1)
    +            reserved_bits &= ~self.RSV1
    +        if reserved_bits:
    +            # client is using as-yet-undefined extensions; abort
    +            self._abort()
    +            return
    +        is_masked = bool(mask_payloadlen & 0x80)
    +        payloadlen = mask_payloadlen & 0x7f
    +
    +        # Parse and validate the length.
    +        if opcode_is_control and payloadlen >= 126:
    +            # control frames must have payload < 126
    +            self._abort()
    +            return
    +        if payloadlen < 126:
    +            self._frame_length = payloadlen
    +        elif payloadlen == 126:
    +            data = yield self._read_bytes(2)
    +            payloadlen = struct.unpack("!H", data)[0]
    +        elif payloadlen == 127:
    +            data = yield self._read_bytes(8)
    +            payloadlen = struct.unpack("!Q", data)[0]
    +        new_len = payloadlen
    +        if self._fragmented_message_buffer is not None:
    +            new_len += len(self._fragmented_message_buffer)
    +        if new_len > self.handler.max_message_size:
    +            self.close(1009, "message too big")
    +            self._abort()
    +            return
    +
    +        # Read the payload, unmasking if necessary.
    +        if is_masked:
    +            self._frame_mask = yield self._read_bytes(4)
    +        data = yield self._read_bytes(payloadlen)
    +        if is_masked:
    +            data = _websocket_mask(self._frame_mask, data)
    +
    +        # Decide what to do with this frame.
    +        if opcode_is_control:
    +            # control frames may be interleaved with a series of fragmented
    +            # data frames, so control frames must not interact with
    +            # self._fragmented_*
    +            if not is_final_frame:
    +                # control frames must not be fragmented
    +                self._abort()
    +                return
    +        elif opcode == 0:  # continuation frame
    +            if self._fragmented_message_buffer is None:
    +                # nothing to continue
    +                self._abort()
    +                return
    +            self._fragmented_message_buffer += data
    +            if is_final_frame:
    +                opcode = self._fragmented_message_opcode
    +                data = self._fragmented_message_buffer
    +                self._fragmented_message_buffer = None
    +        else:  # start of new data message
    +            if self._fragmented_message_buffer is not None:
    +                # can't start new message until the old one is finished
    +                self._abort()
    +                return
    +            if not is_final_frame:
    +                self._fragmented_message_opcode = opcode
    +                self._fragmented_message_buffer = data
    +
    +        if is_final_frame:
    +            handled_future = self._handle_message(opcode, data)
    +            if handled_future is not None:
    +                yield handled_future
    +
    +    def _handle_message(self, opcode, data):
    +        """Execute on_message, returning its Future if it is a coroutine."""
    +        if self.client_terminated:
    +            return
    +
    +        if self._frame_compressed:
    +            try:
    +                data = self._decompressor.decompress(data)
    +            except _DecompressTooLargeError:
    +                self.close(1009, "message too big after decompression")
    +                self._abort()
    +                return
    +
    +        if opcode == 0x1:
    +            # UTF-8 data
    +            self._message_bytes_in += len(data)
    +            try:
    +                decoded = data.decode("utf-8")
    +            except UnicodeDecodeError:
    +                self._abort()
    +                return
    +            return self._run_callback(self.handler.on_message, decoded)
    +        elif opcode == 0x2:
    +            # Binary data
    +            self._message_bytes_in += len(data)
    +            return self._run_callback(self.handler.on_message, data)
    +        elif opcode == 0x8:
    +            # Close
    +            self.client_terminated = True
    +            if len(data) >= 2:
    +                self.handler.close_code = struct.unpack('>H', data[:2])[0]
    +            if len(data) > 2:
    +                self.handler.close_reason = to_unicode(data[2:])
    +            # Echo the received close code, if any (RFC 6455 section 5.5.1).
    +            self.close(self.handler.close_code)
    +        elif opcode == 0x9:
    +            # Ping
    +            try:
    +                self._write_frame(True, 0xA, data)
    +            except StreamClosedError:
    +                self._abort()
    +            self._run_callback(self.handler.on_ping, data)
    +        elif opcode == 0xA:
    +            # Pong
    +            self.last_pong = IOLoop.current().time()
    +            return self._run_callback(self.handler.on_pong, data)
    +        else:
    +            self._abort()
    +
    +    def close(self, code=None, reason=None):
    +        """Closes the WebSocket connection."""
    +        if not self.server_terminated:
    +            if not self.stream.closed():
    +                if code is None and reason is not None:
    +                    code = 1000  # "normal closure" status code
    +                if code is None:
    +                    close_data = b''
    +                else:
    +                    close_data = struct.pack('>H', code)
    +                if reason is not None:
    +                    close_data += utf8(reason)
    +                try:
    +                    self._write_frame(True, 0x8, close_data)
    +                except StreamClosedError:
    +                    self._abort()
    +            self.server_terminated = True
    +        if self.client_terminated:
    +            if self._waiting is not None:
    +                self.stream.io_loop.remove_timeout(self._waiting)
    +                self._waiting = None
    +            self.stream.close()
    +        elif self._waiting is None:
    +            # Give the client a few seconds to complete a clean shutdown,
    +            # otherwise just close the connection.
    +            self._waiting = self.stream.io_loop.add_timeout(
    +                self.stream.io_loop.time() + 5, self._abort)
    +
    +    @property
    +    def ping_interval(self):
    +        interval = self.handler.ping_interval
    +        if interval is not None:
    +            return interval
    +        return 0
    +
    +    @property
    +    def ping_timeout(self):
    +        timeout = self.handler.ping_timeout
    +        if timeout is not None:
    +            return timeout
    +        return max(3 * self.ping_interval, 30)
    +
    +    def start_pinging(self):
    +        """Start sending periodic pings to keep the connection alive"""
    +        if self.ping_interval > 0:
    +            self.last_ping = self.last_pong = IOLoop.current().time()
    +            self.ping_callback = PeriodicCallback(
    +                self.periodic_ping, self.ping_interval * 1000)
    +            self.ping_callback.start()
    +
    +    def periodic_ping(self):
    +        """Send a ping to keep the websocket alive
    +
    +        Called periodically if the websocket_ping_interval is set and non-zero.
    +        """
    +        if self.stream.closed() and self.ping_callback is not None:
    +            self.ping_callback.stop()
    +            return
    +
    +        # Check for timeout on pong. Make sure that we really have
    +        # sent a recent ping in case the machine with both server and
    +        # client has been suspended since the last ping.
    +        now = IOLoop.current().time()
    +        since_last_pong = now - self.last_pong
    +        since_last_ping = now - self.last_ping
    +        if (since_last_ping < 2 * self.ping_interval and
    +                since_last_pong > self.ping_timeout):
    +            self.close()
    +            return
    +
    +        self.write_ping(b'')
    +        self.last_ping = now
    +
    +
    +class WebSocketClientConnection(simple_httpclient._HTTPConnection):
    +    """WebSocket client connection.
    +
    +    This class should not be instantiated directly; use the
    +    `websocket_connect` function instead.
    +    """
    +    def __init__(self, request, on_message_callback=None,
    +                 compression_options=None, ping_interval=None, ping_timeout=None,
    +                 max_message_size=None, subprotocols=[]):
    +        self.compression_options = compression_options
    +        self.connect_future = Future()
    +        self.protocol = None
    +        self.read_queue = Queue(1)
    +        self.key = base64.b64encode(os.urandom(16))
    +        self._on_message_callback = on_message_callback
    +        self.close_code = self.close_reason = None
    +        self.ping_interval = ping_interval
    +        self.ping_timeout = ping_timeout
    +        self.max_message_size = max_message_size
    +
    +        scheme, sep, rest = request.url.partition(':')
    +        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
    +        request.url = scheme + sep + rest
    +        request.headers.update({
    +            'Upgrade': 'websocket',
    +            'Connection': 'Upgrade',
    +            'Sec-WebSocket-Key': self.key,
    +            'Sec-WebSocket-Version': '13',
    +        })
    +        if subprotocols is not None:
    +            request.headers['Sec-WebSocket-Protocol'] = ','.join(subprotocols)
    +        if self.compression_options is not None:
    +            # Always offer to let the server set our max_wbits (and even though
    +            # we don't offer it, we will accept a client_no_context_takeover
    +            # from the server).
    +            # TODO: set server parameters for deflate extension
    +            # if requested in self.compression_options.
    +            request.headers['Sec-WebSocket-Extensions'] = (
    +                'permessage-deflate; client_max_window_bits')
    +
    +        self.tcp_client = TCPClient()
    +        super(WebSocketClientConnection, self).__init__(
    +            None, request, lambda: None, self._on_http_response,
    +            104857600, self.tcp_client, 65536, 104857600)
    +
    +    def close(self, code=None, reason=None):
    +        """Closes the websocket connection.
    +
    +        ``code`` and ``reason`` are documented under
    +        `WebSocketHandler.close`.
    +
    +        .. versionadded:: 3.2
    +
    +        .. versionchanged:: 4.0
    +
    +           Added the ``code`` and ``reason`` arguments.
    +        """
    +        if self.protocol is not None:
    +            self.protocol.close(code, reason)
    +            self.protocol = None
    +
    +    def on_connection_close(self):
    +        if not self.connect_future.done():
    +            self.connect_future.set_exception(StreamClosedError())
    +        self.on_message(None)
    +        self.tcp_client.close()
    +        super(WebSocketClientConnection, self).on_connection_close()
    +
    +    def _on_http_response(self, response):
    +        if not self.connect_future.done():
    +            if response.error:
    +                self.connect_future.set_exception(response.error)
    +            else:
    +                self.connect_future.set_exception(WebSocketError(
    +                    "Non-websocket response"))
    +
    +    def headers_received(self, start_line, headers):
    +        if start_line.code != 101:
    +            return super(WebSocketClientConnection, self).headers_received(
    +                start_line, headers)
    +
    +        self.headers = headers
    +        self.protocol = self.get_websocket_protocol()
    +        self.protocol._process_server_headers(self.key, self.headers)
    +        self.protocol.start_pinging()
    +        IOLoop.current().add_callback(self.protocol._receive_frame_loop)
    +
    +        if self._timeout is not None:
    +            self.io_loop.remove_timeout(self._timeout)
    +            self._timeout = None
    +
    +        self.stream = self.connection.detach()
    +        self.stream.set_close_callback(self.on_connection_close)
    +        # Once we've taken over the connection, clear the final callback
    +        # we set on the http request.  This deactivates the error handling
    +        # in simple_httpclient that would otherwise interfere with our
    +        # ability to see exceptions.
    +        self.final_callback = None
    +
    +        future_set_result_unless_cancelled(self.connect_future, self)
    +
    +    def write_message(self, message, binary=False):
    +        """Sends a message to the WebSocket server.
    +
    +        If the stream is closed, raises `WebSocketClosedError`.
    +        Returns a `.Future` which can be used for flow control.
    +
    +        .. versionchanged:: 5.0
    +           Exception raised on a closed stream changed from `.StreamClosedError`
    +           to `WebSocketClosedError`.
    +        """
    +        return self.protocol.write_message(message, binary=binary)
    +
    +    def read_message(self, callback=None):
    +        """Reads a message from the WebSocket server.
    +
    +        If on_message_callback was specified at WebSocket
    +        initialization, this function will never return messages
    +
    +        Returns a future whose result is the message, or None
    +        if the connection is closed.  If a callback argument
    +        is given it will be called with the future when it is
    +        ready.
    +        """
    +
    +        future = self.read_queue.get()
    +        if callback is not None:
    +            self.io_loop.add_future(future, callback)
    +        return future
    +
    +    def on_message(self, message):
    +        if self._on_message_callback:
    +            self._on_message_callback(message)
    +        else:
    +            return self.read_queue.put(message)
    +
    +    def ping(self, data=b''):
    +        """Send ping frame to the remote end.
    +
    +        The data argument allows a small amount of data (up to 125
    +        bytes) to be sent as a part of the ping message. Note that not
    +        all websocket implementations expose this data to
    +        applications.
    +
    +        Consider using the ``ping_interval`` argument to
    +        `websocket_connect` instead of sending pings manually.
    +
    +        .. versionadded:: 5.1
    +
    +        """
    +        data = utf8(data)
    +        if self.protocol is None:
    +            raise WebSocketClosedError()
    +        self.protocol.write_ping(data)
    +
    +    def on_pong(self, data):
    +        pass
    +
    +    def on_ping(self, data):
    +        pass
    +
    +    def get_websocket_protocol(self):
    +        return WebSocketProtocol13(self, mask_outgoing=True,
    +                                   compression_options=self.compression_options)
    +
    +    @property
    +    def selected_subprotocol(self):
    +        """The subprotocol selected by the server.
    +
    +        .. versionadded:: 5.1
    +        """
    +        return self.protocol.selected_subprotocol
    +
    +
    +def websocket_connect(url, callback=None, connect_timeout=None,
    +                      on_message_callback=None, compression_options=None,
    +                      ping_interval=None, ping_timeout=None,
    +                      max_message_size=_default_max_message_size, subprotocols=None):
    +    """Client-side websocket support.
    +
    +    Takes a url and returns a Future whose result is a
    +    `WebSocketClientConnection`.
    +
    +    ``compression_options`` is interpreted in the same way as the
    +    return value of `.WebSocketHandler.get_compression_options`.
    +
    +    The connection supports two styles of operation. In the coroutine
    +    style, the application typically calls
    +    `~.WebSocketClientConnection.read_message` in a loop::
    +
    +        conn = yield websocket_connect(url)
    +        while True:
    +            msg = yield conn.read_message()
    +            if msg is None: break
    +            # Do something with msg
    +
    +    In the callback style, pass an ``on_message_callback`` to
    +    ``websocket_connect``. In both styles, a message of ``None``
    +    indicates that the connection has been closed.
    +
    +    ``subprotocols`` may be a list of strings specifying proposed
    +    subprotocols. The selected protocol may be found on the
    +    ``selected_subprotocol`` attribute of the connection object
    +    when the connection is complete.
    +
    +    .. versionchanged:: 3.2
    +       Also accepts ``HTTPRequest`` objects in place of urls.
    +
    +    .. versionchanged:: 4.1
    +       Added ``compression_options`` and ``on_message_callback``.
    +
    +    .. versionchanged:: 4.5
    +       Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
    +       arguments, which have the same meaning as in `WebSocketHandler`.
    +
    +    .. versionchanged:: 5.0
    +       The ``io_loop`` argument (deprecated since version 4.1) has been removed.
    +
    +    .. versionchanged:: 5.1
    +       Added the ``subprotocols`` argument.
    +    """
    +    if isinstance(url, httpclient.HTTPRequest):
    +        assert connect_timeout is None
    +        request = url
    +        # Copy and convert the headers dict/object (see comments in
    +        # AsyncHTTPClient.fetch)
    +        request.headers = httputil.HTTPHeaders(request.headers)
    +    else:
    +        request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
    +    request = httpclient._RequestProxy(
    +        request, httpclient.HTTPRequest._DEFAULTS)
    +    conn = WebSocketClientConnection(request,
    +                                     on_message_callback=on_message_callback,
    +                                     compression_options=compression_options,
    +                                     ping_interval=ping_interval,
    +                                     ping_timeout=ping_timeout,
    +                                     max_message_size=max_message_size,
    +                                     subprotocols=subprotocols)
    +    if callback is not None:
    +        IOLoop.current().add_future(conn.connect_future, callback)
    +    return conn.connect_future
    diff --git a/server/www/packages/packages-windows/x86/tornado/wsgi.py b/server/www/packages/packages-windows/x86/tornado/wsgi.py
    new file mode 100644
    index 0000000..e1230da
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/tornado/wsgi.py
    @@ -0,0 +1,377 @@
    +#
    +# Copyright 2009 Facebook
    +#
    +# Licensed under the Apache License, Version 2.0 (the "License"); you may
    +# not use this file except in compliance with the License. You may obtain
    +# a copy of the License at
    +#
    +#     http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    +# License for the specific language governing permissions and limitations
    +# under the License.
    +
    +"""WSGI support for the Tornado web framework.
    +
    +WSGI is the Python standard for web servers, and allows for interoperability
    +between Tornado and other Python web frameworks and servers.  This module
    +provides WSGI support in two ways:
    +
    +* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
    +  interface.  This is useful for running a Tornado app on another
    +  HTTP server, such as Google App Engine.  See the `WSGIAdapter` class
    +  documentation for limitations that apply.
    +* `WSGIContainer` lets you run other WSGI applications and frameworks on the
    +  Tornado HTTP server.  For example, with this class you can mix Django
    +  and Tornado handlers in a single server.
    +"""
    +
    +from __future__ import absolute_import, division, print_function
    +
    +import sys
    +from io import BytesIO
    +import tornado
    +import warnings
    +
    +from tornado.concurrent import Future
    +from tornado import escape
    +from tornado import httputil
    +from tornado.log import access_log
    +from tornado import web
    +from tornado.escape import native_str
    +from tornado.util import unicode_type, PY3
    +
    +
    +if PY3:
    +    import urllib.parse as urllib_parse  # py3
    +else:
    +    import urllib as urllib_parse
    +
    +# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
    +# that are smuggled inside objects of type unicode (via the latin1 encoding).
    +# These functions are like those in the tornado.escape module, but defined
    +# here to minimize the temptation to use them in non-wsgi contexts.
    +if str is unicode_type:
    +    def to_wsgi_str(s):
    +        assert isinstance(s, bytes)
    +        return s.decode('latin1')
    +
    +    def from_wsgi_str(s):
    +        assert isinstance(s, str)
    +        return s.encode('latin1')
    +else:
    +    def to_wsgi_str(s):
    +        assert isinstance(s, bytes)
    +        return s
    +
    +    def from_wsgi_str(s):
    +        assert isinstance(s, str)
    +        return s
    +
    +
    +class WSGIApplication(web.Application):
    +    """A WSGI equivalent of `tornado.web.Application`.
    +
    +    .. deprecated:: 4.0
    +
    +       Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
    +       This class will be removed in Tornado 6.0.
    +    """
    +    def __call__(self, environ, start_response):
    +        return WSGIAdapter(self)(environ, start_response)
    +
    +
    +# WSGI has no facilities for flow control, so just return an already-done
    +# Future when the interface requires it.
    +def _dummy_future():
    +    f = Future()
    +    f.set_result(None)
    +    return f
    +
    +
    +class _WSGIConnection(httputil.HTTPConnection):
    +    def __init__(self, method, start_response, context):
    +        self.method = method
    +        self.start_response = start_response
    +        self.context = context
    +        self._write_buffer = []
    +        self._finished = False
    +        self._expected_content_remaining = None
    +        self._error = None
    +
    +    def set_close_callback(self, callback):
    +        # WSGI has no facility for detecting a closed connection mid-request,
    +        # so we can simply ignore the callback.
    +        pass
    +
    +    def write_headers(self, start_line, headers, chunk=None, callback=None):
    +        if self.method == 'HEAD':
    +            self._expected_content_remaining = 0
    +        elif 'Content-Length' in headers:
    +            self._expected_content_remaining = int(headers['Content-Length'])
    +        else:
    +            self._expected_content_remaining = None
    +        self.start_response(
    +            '%s %s' % (start_line.code, start_line.reason),
    +            [(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
    +        if chunk is not None:
    +            self.write(chunk, callback)
    +        elif callback is not None:
    +            callback()
    +        return _dummy_future()
    +
    +    def write(self, chunk, callback=None):
    +        if self._expected_content_remaining is not None:
    +            self._expected_content_remaining -= len(chunk)
    +            if self._expected_content_remaining < 0:
    +                self._error = httputil.HTTPOutputError(
    +                    "Tried to write more data than Content-Length")
    +                raise self._error
    +        self._write_buffer.append(chunk)
    +        if callback is not None:
    +            callback()
    +        return _dummy_future()
    +
    +    def finish(self):
    +        if (self._expected_content_remaining is not None and
    +                self._expected_content_remaining != 0):
    +            self._error = httputil.HTTPOutputError(
    +                "Tried to write %d bytes less than Content-Length" %
    +                self._expected_content_remaining)
    +            raise self._error
    +        self._finished = True
    +
    +
    +class _WSGIRequestContext(object):
    +    def __init__(self, remote_ip, protocol):
    +        self.remote_ip = remote_ip
    +        self.protocol = protocol
    +
    +    def __str__(self):
    +        return self.remote_ip
    +
    +
    +class WSGIAdapter(object):
    +    """Converts a `tornado.web.Application` instance into a WSGI application.
    +
    +    Example usage::
    +
    +        import tornado.web
    +        import tornado.wsgi
    +        import wsgiref.simple_server
    +
    +        class MainHandler(tornado.web.RequestHandler):
    +            def get(self):
    +                self.write("Hello, world")
    +
    +        if __name__ == "__main__":
    +            application = tornado.web.Application([
    +                (r"/", MainHandler),
    +            ])
    +            wsgi_app = tornado.wsgi.WSGIAdapter(application)
    +            server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
    +            server.serve_forever()
    +
    +    See the `appengine demo
    +    `_
    +    for an example of using this module to run a Tornado app on Google
    +    App Engine.
    +
    +    In WSGI mode asynchronous methods are not supported.  This means
    +    that it is not possible to use `.AsyncHTTPClient`, or the
    +    `tornado.auth` or `tornado.websocket` modules.
    +
    +    In multithreaded WSGI servers on Python 3, it may be necessary to
    +    permit `asyncio` to create event loops on any thread. Run the
    +    following at startup (typically import time for WSGI
    +    applications)::
    +
    +        import asyncio
    +        from tornado.platform.asyncio import AnyThreadEventLoopPolicy
    +        asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
    +
    +    .. versionadded:: 4.0
    +
    +    .. deprecated:: 5.1
    +
    +       This class is deprecated and will be removed in Tornado 6.0.
    +       Use Tornado's `.HTTPServer` instead of a WSGI container.
    +    """
    +    def __init__(self, application):
    +        warnings.warn("WSGIAdapter is deprecated, use Tornado's HTTPServer instead",
    +                      DeprecationWarning)
    +        if isinstance(application, WSGIApplication):
    +            self.application = lambda request: web.Application.__call__(
    +                application, request)
    +        else:
    +            self.application = application
    +
    +    def __call__(self, environ, start_response):
    +        method = environ["REQUEST_METHOD"]
    +        uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
    +        uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
    +        if environ.get("QUERY_STRING"):
    +            uri += "?" + environ["QUERY_STRING"]
    +        headers = httputil.HTTPHeaders()
    +        if environ.get("CONTENT_TYPE"):
    +            headers["Content-Type"] = environ["CONTENT_TYPE"]
    +        if environ.get("CONTENT_LENGTH"):
    +            headers["Content-Length"] = environ["CONTENT_LENGTH"]
    +        for key in environ:
    +            if key.startswith("HTTP_"):
    +                headers[key[5:].replace("_", "-")] = environ[key]
    +        if headers.get("Content-Length"):
    +            body = environ["wsgi.input"].read(
    +                int(headers["Content-Length"]))
    +        else:
    +            body = b""
    +        protocol = environ["wsgi.url_scheme"]
    +        remote_ip = environ.get("REMOTE_ADDR", "")
    +        if environ.get("HTTP_HOST"):
    +            host = environ["HTTP_HOST"]
    +        else:
    +            host = environ["SERVER_NAME"]
    +        connection = _WSGIConnection(method, start_response,
    +                                     _WSGIRequestContext(remote_ip, protocol))
    +        request = httputil.HTTPServerRequest(
    +            method, uri, "HTTP/1.1", headers=headers, body=body,
    +            host=host, connection=connection)
    +        request._parse_body()
    +        self.application(request)
    +        if connection._error:
    +            raise connection._error
    +        if not connection._finished:
    +            raise Exception("request did not finish synchronously")
    +        return connection._write_buffer
    +
    +
    +class WSGIContainer(object):
    +    r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
    +
    +    .. warning::
    +
    +       WSGI is a *synchronous* interface, while Tornado's concurrency model
    +       is based on single-threaded asynchronous execution.  This means that
    +       running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
    +       than running the same app in a multi-threaded WSGI server like
    +       ``gunicorn`` or ``uwsgi``.  Use `WSGIContainer` only when there are
    +       benefits to combining Tornado and WSGI in the same process that
    +       outweigh the reduced scalability.
    +
    +    Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
    +    run it. For example::
    +
    +        def simple_app(environ, start_response):
    +            status = "200 OK"
    +            response_headers = [("Content-type", "text/plain")]
    +            start_response(status, response_headers)
    +            return ["Hello world!\n"]
    +
    +        container = tornado.wsgi.WSGIContainer(simple_app)
    +        http_server = tornado.httpserver.HTTPServer(container)
    +        http_server.listen(8888)
    +        tornado.ioloop.IOLoop.current().start()
    +
    +    This class is intended to let other frameworks (Django, web.py, etc)
    +    run on the Tornado HTTP server and I/O loop.
    +
    +    The `tornado.web.FallbackHandler` class is often useful for mixing
    +    Tornado and WSGI apps in the same server.  See
    +    https://github.com/bdarnell/django-tornado-demo for a complete example.
    +    """
    +    def __init__(self, wsgi_application):
    +        self.wsgi_application = wsgi_application
    +
    +    def __call__(self, request):
    +        data = {}
    +        response = []
    +
    +        def start_response(status, response_headers, exc_info=None):
    +            data["status"] = status
    +            data["headers"] = response_headers
    +            return response.append
    +        app_response = self.wsgi_application(
    +            WSGIContainer.environ(request), start_response)
    +        try:
    +            response.extend(app_response)
    +            body = b"".join(response)
    +        finally:
    +            if hasattr(app_response, "close"):
    +                app_response.close()
    +        if not data:
    +            raise Exception("WSGI app did not call start_response")
    +
    +        status_code, reason = data["status"].split(' ', 1)
    +        status_code = int(status_code)
    +        headers = data["headers"]
    +        header_set = set(k.lower() for (k, v) in headers)
    +        body = escape.utf8(body)
    +        if status_code != 304:
    +            if "content-length" not in header_set:
    +                headers.append(("Content-Length", str(len(body))))
    +            if "content-type" not in header_set:
    +                headers.append(("Content-Type", "text/html; charset=UTF-8"))
    +        if "server" not in header_set:
    +            headers.append(("Server", "TornadoServer/%s" % tornado.version))
    +
    +        start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
    +        header_obj = httputil.HTTPHeaders()
    +        for key, value in headers:
    +            header_obj.add(key, value)
    +        request.connection.write_headers(start_line, header_obj, chunk=body)
    +        request.connection.finish()
    +        self._log(status_code, request)
    +
    +    @staticmethod
    +    def environ(request):
    +        """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
    +        """
    +        hostport = request.host.split(":")
    +        if len(hostport) == 2:
    +            host = hostport[0]
    +            port = int(hostport[1])
    +        else:
    +            host = request.host
    +            port = 443 if request.protocol == "https" else 80
    +        environ = {
    +            "REQUEST_METHOD": request.method,
    +            "SCRIPT_NAME": "",
    +            "PATH_INFO": to_wsgi_str(escape.url_unescape(
    +                request.path, encoding=None, plus=False)),
    +            "QUERY_STRING": request.query,
    +            "REMOTE_ADDR": request.remote_ip,
    +            "SERVER_NAME": host,
    +            "SERVER_PORT": str(port),
    +            "SERVER_PROTOCOL": request.version,
    +            "wsgi.version": (1, 0),
    +            "wsgi.url_scheme": request.protocol,
    +            "wsgi.input": BytesIO(escape.utf8(request.body)),
    +            "wsgi.errors": sys.stderr,
    +            "wsgi.multithread": False,
    +            "wsgi.multiprocess": True,
    +            "wsgi.run_once": False,
    +        }
    +        if "Content-Type" in request.headers:
    +            environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
    +        if "Content-Length" in request.headers:
    +            environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
    +        for key, value in request.headers.items():
    +            environ["HTTP_" + key.replace("-", "_").upper()] = value
    +        return environ
    +
    +    def _log(self, status_code, request):
    +        if status_code < 400:
    +            log_method = access_log.info
    +        elif status_code < 500:
    +            log_method = access_log.warning
    +        else:
    +            log_method = access_log.error
    +        request_time = 1000.0 * request.request_time()
    +        summary = request.method + " " + request.uri + " (" + \
    +            request.remote_ip + ")"
    +        log_method("%d %s %.2fms", status_code, summary, request_time)
    +
    +
    +HTTPRequest = httputil.HTTPServerRequest
    diff --git a/server/www/packages/packages-windows/x86/wheezy/captcha/__init__.py b/server/www/packages/packages-windows/x86/wheezy/captcha/__init__.py
    new file mode 100644
    index 0000000..033e12e
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/wheezy/captcha/__init__.py
    @@ -0,0 +1,5 @@
    +
    +"""
    +"""
    +
    +__version__ = '0.1.44'
    diff --git a/server/www/packages/packages-windows/x86/wheezy/captcha/bezier.py b/server/www/packages/packages-windows/x86/wheezy/captcha/bezier.py
    new file mode 100644
    index 0000000..48fd137
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/wheezy/captcha/bezier.py
    @@ -0,0 +1,43 @@
    +
    +"""
    +"""
    +
    +
    +tsequence = tuple([t / 20.0 for t in range(21)])
    +beziers = {}
    +
    +
    +def pascal_row(n):
    +    """ Returns n-th row of Pascal's triangle
    +    """
    +    result = [1]
    +    x, numerator = 1, n
    +    for denominator in range(1, n // 2 + 1):
    +        x *= numerator
    +        x /= denominator
    +        result.append(x)
    +        numerator -= 1
    +    if n & 1 == 0:
    +        result.extend(reversed(result[:-1]))
    +    else:
    +        result.extend(reversed(result))
    +    return result
    +
    +
    +def make_bezier(n):
    +    """ Bezier curves:
    +        http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization
    +    """
    +    try:
    +        return beziers[n]
    +    except KeyError:
    +        combinations = pascal_row(n - 1)
    +        result = []
    +        for t in tsequence:
    +            tpowers = (t ** i for i in range(n))
    +            upowers = ((1 - t) ** i for i in range(n - 1, -1, -1))
    +            coefs = [c * a * b for c, a, b in zip(combinations,
    +                                                  tpowers, upowers)]
    +            result.append(coefs)
    +        beziers[n] = result
    +        return result
    diff --git a/server/www/packages/packages-windows/x86/wheezy/captcha/comp.py b/server/www/packages/packages-windows/x86/wheezy/captcha/comp.py
    new file mode 100644
    index 0000000..28ccd97
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/wheezy/captcha/comp.py
    @@ -0,0 +1,26 @@
    +""" ``comp`` module.
    +"""
    +
    +import sys
    +
    +
    +PY3 = sys.version_info[0] >= 3
    +
    +
    +if PY3:  # pragma: nocover
    +    xrange = range
    +else:  # pragma: nocover
    +    xrange = xrange
    +
    +try:  # pragma: nocover
    +    from PIL import Image
    +    from PIL import ImageFilter
    +    from PIL.ImageColor import getrgb
    +    from PIL.ImageDraw import Draw
    +    from PIL.ImageFont import truetype
    +except ImportError:  # pragma: nocover
    +    import Image  # noqa
    +    import ImageFilter  # noqa
    +    from ImageColor import getrgb  # noqa
    +    from ImageDraw import Draw  # noqa
    +    from ImageFont import truetype  # noqa
    diff --git a/server/www/packages/packages-windows/x86/wheezy/captcha/http.py b/server/www/packages/packages-windows/x86/wheezy/captcha/http.py
    new file mode 100644
    index 0000000..ab56937
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/wheezy/captcha/http.py
    @@ -0,0 +1,126 @@
    +
    +"""
    +"""
    +
    +import random
    +
    +from datetime import timedelta
    +from time import time
    +from uuid import uuid4
    +
    +from wheezy.core.collections import last_item_adapter
    +from wheezy.core.uuid import shrink_uuid
    +from wheezy.http import CacheProfile
    +from wheezy.http import HTTPResponse
    +from wheezy.http import accept_method
    +from wheezy.http import bad_request
    +from wheezy.http import response_cache
    +
    +
    +class FileAdapter(object):
    +
    +    def __init__(self, response):
    +        self.write = response.write_bytes
    +
    +
    +class CaptchaContext(object):
    +
    +    def __init__(self, image,
    +                 cache, prefix='captcha:', namespace=None,
    +                 timeout=5 * 60, profile=None,
    +                 chars='ABCDEFGHJKLMNPQRSTUVWXYZ23456789',
    +                 max_chars=4, wait_timeout=2,
    +                 challenge_key='c', turing_key='turing_number',
    +                 enabled=True):
    +        self.image = image
    +        self.cache = cache
    +        self.prefix = prefix
    +        self.namespace = namespace
    +        self.timeout = timeout
    +        self.chars = chars
    +        self.wait_timeout = wait_timeout
    +        self.max_chars = max_chars
    +        self.challenge_key = challenge_key
    +        self.turing_key = turing_key
    +        self.enabled = enabled
    +        if profile:
    +            self.profile = profile
    +        else:
    +            self.profile = CacheProfile(
    +                'server',
    +                vary_query=[challenge_key],
    +                duration=timedelta(seconds=wait_timeout),
    +                no_store=True,
    +                namespace=namespace)
    +
    +    def create_handler(self, content_type='image/jpg', format='JPEG',
    +                       **options):
    +        @accept_method('GET')
    +        @response_cache(self.profile)
    +        def handler(request):
    +            if self.challenge_key not in request.query:
    +                return bad_request()
    +            challenge_code = last_item_adapter(
    +                request.query)[self.challenge_key]
    +            turing_number = ''.join(random.sample(self.chars, self.max_chars))
    +            if not self.cache.set(self.prefix + challenge_code,
    +                                  (int(time()), turing_number),
    +                                  self.timeout, self.namespace):
    +                return bad_request()
    +            response = HTTPResponse(content_type)
    +            self.image(turing_number).save(
    +                FileAdapter(response), format, **options)
    +            return response
    +        return handler
    +
    +    def get_challenge_code(self, request):
    +        if self.challenge_key not in request.query:
    +            return shrink_uuid(uuid4())
    +        else:
    +            return request.query[self.challenge_key][0]
    +
    +    def validate(self, request, errors, gettext):
    +        if not self.enabled:
    +            return True
    +        if self.challenge_key not in request.form:
    +            self.append_error(errors, gettext(
    +                'The challenge code is not available.'))
    +            return False
    +        if self.turing_key not in request.form:
    +            self.append_error(errors, gettext(
    +                'The turing number is not available.'))
    +            return False
    +        form = last_item_adapter(request.form)
    +        challenge_code = form[self.challenge_key]
    +        if len(challenge_code) != 22:
    +            self.append_error(errors, gettext(
    +                'The challenge code is invalid.'))
    +            return False
    +        entered_turing_number = form[self.turing_key]
    +        if len(entered_turing_number) != self.max_chars:
    +            self.append_error(errors, gettext(
    +                'The turing number is invalid.'))
    +            return False
    +
    +        key = self.prefix + challenge_code
    +        data = self.cache.get(key, self.namespace)
    +        if not data:
    +            self.append_error(errors, gettext(
    +                'The code you typed has expired after %d seconds.')
    +                % self.timeout)
    +            return False
    +        self.cache.delete(key, 0, self.namespace)
    +        issued, turing_number = data
    +        if issued + self.wait_timeout > int(time()):
    +            self.append_error(errors, gettext(
    +                'The code was typed too quickly. Wait at least %d seconds.')
    +                % self.wait_timeout)
    +            return False
    +        if turing_number != entered_turing_number.upper():
    +            self.append_error(
    +                errors, gettext('The code you typed has no match.'))
    +            return False
    +        return True
    +
    +    def append_error(self, errors, message):
    +        errors.setdefault(self.turing_key, []).append(message)
    diff --git a/server/www/packages/packages-windows/x86/wheezy/captcha/image.py b/server/www/packages/packages-windows/x86/wheezy/captcha/image.py
    new file mode 100644
    index 0000000..626dd72
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/wheezy/captcha/image.py
    @@ -0,0 +1,186 @@
    +"""
    +"""
    +
    +import random
    +
    +from wheezy.captcha.comp import Draw
    +from wheezy.captcha.comp import Image
    +from wheezy.captcha.comp import ImageFilter
    +from wheezy.captcha.comp import getrgb
    +from wheezy.captcha.comp import truetype
    +from wheezy.captcha.comp import xrange
    +
    +
    +def captcha(drawings, width=200, height=75):
    +    def render(text):
    +        image = Image.new('RGB', (width, height), (255, 255, 255))
    +        for drawing in drawings:
    +            image = drawing(image, text)
    +            assert image
    +        return image
    +    return render
    +
    +
    +# region: captcha drawers
    +
    +def background(color='#EEEECC'):
    +    color = getrgb(color)
    +
    +    def drawer(image, text):
    +        Draw(image).rectangle([(0, 0), image.size], fill=color)
    +        return image
    +    return drawer
    +
    +
    +def smooth():
    +    def drawer(image, text):
    +        return image.filter(ImageFilter.SMOOTH)
    +    return drawer
    +
    +
    +def curve(color='#5C87B2', width=4, number=6):
    +    from wheezy.captcha.bezier import make_bezier
    +    if not callable(color):
    +        c = getrgb(color)
    +
    +        def color():
    +            return c
    +
    +    def drawer(image, text):
    +        dx, height = image.size
    +        dx = dx / number
    +        path = [(dx * i, random.randint(0, height))
    +                for i in range(1, number)]
    +        bcoefs = make_bezier(number - 1)
    +        points = []
    +        for coefs in bcoefs:
    +            points.append(tuple(sum([coef * p for coef, p in zip(coefs, ps)])
    +                          for ps in zip(*path)))
    +        draw = Draw(image)
    +        draw.line(points, fill=color(), width=width)
    +        return image
    +    return drawer
    +
    +
    +def noise(number=50, color='#EEEECC', level=2):
    +    if not callable(color):
    +        c = getrgb(color)
    +
    +        def color():
    +            return c
    +
    +    def drawer(image, text):
    +        width, height = image.size
    +        dx = width / 10
    +        width = width - dx
    +        dy = height / 10
    +        height = height - dy
    +        draw = Draw(image)
    +        for i in xrange(number):
    +            x = int(random.uniform(dx, width))
    +            y = int(random.uniform(dy, height))
    +            draw.line(((x, y), (x + level, y)), fill=color(), width=level)
    +        return image
    +    return drawer
    +
    +
    +def text(fonts, font_sizes=None, drawings=None, color='#5C87B2',
    +         squeeze_factor=0.8):
    +    fonts = tuple([truetype(name, size)
    +                   for name in fonts
    +                   for size in font_sizes or (65, 70, 75)])
    +    if not callable(color):
    +        c = getrgb(color)
    +
    +        def color():
    +            return c
    +
    +    def drawer(image, text):
    +        draw = Draw(image)
    +        char_images = []
    +        for c in text:
    +            font = random.choice(fonts)
    +            c_width, c_height = draw.textsize(c, font=font)
    +            char_image = Image.new('RGB', (c_width, c_height), (0, 0, 0))
    +            char_draw = Draw(char_image)
    +            char_draw.text((0, 0), c, font=font, fill=color())
    +            char_image = char_image.crop(char_image.getbbox())
    +            for drawing in drawings:
    +                char_image = drawing(char_image)
    +            char_images.append(char_image)
    +        width, height = image.size
    +        offset = int((width - sum(int(i.size[0] * squeeze_factor)
    +                                  for i in char_images[:-1]) -
    +                      char_images[-1].size[0]) / 2)
    +        for char_image in char_images:
    +            c_width, c_height = char_image.size
    +            mask = char_image.convert('L').point(lambda i: i * 1.97)
    +            image.paste(char_image,
    +                        (offset, int((height - c_height) / 2)),
    +                        mask)
    +            offset += int(c_width * squeeze_factor)
    +        return image
    +    return drawer
    +
    +
    +# region: text drawers
    +
    +def warp(dx_factor=0.27, dy_factor=0.21):
    +    def drawer(image):
    +        width, height = image.size
    +        dx = width * dx_factor
    +        dy = height * dy_factor
    +        x1 = int(random.uniform(-dx, dx))
    +        y1 = int(random.uniform(-dy, dy))
    +        x2 = int(random.uniform(-dx, dx))
    +        y2 = int(random.uniform(-dy, dy))
    +        image2 = Image.new('RGB',
    +                           (width + abs(x1) + abs(x2),
    +                            height + abs(y1) + abs(y2)))
    +        image2.paste(image, (abs(x1), abs(y1)))
    +        width2, height2 = image2.size
    +        return image2.transform(
    +            (width, height), Image.QUAD,
    +            (x1, y1,
    +             -x1, height2 - y2,
    +             width2 + x2, height2 + y2,
    +             width2 - x2, -y1))
    +    return drawer
    +
    +
    +def offset(dx_factor=0.1, dy_factor=0.2):
    +    def drawer(image):
    +        width, height = image.size
    +        dx = int(random.random() * width * dx_factor)
    +        dy = int(random.random() * height * dy_factor)
    +        image2 = Image.new('RGB', (width + dx, height + dy))
    +        image2.paste(image, (dx, dy))
    +        return image2
    +    return drawer
    +
    +
    +def rotate(angle=25):
    +    def drawer(image):
    +        return image.rotate(
    +            random.uniform(-angle, angle), Image.BILINEAR, expand=1)
    +    return drawer
    +
    +
    +if __name__ == '__main__':
    +    import string
    +    captcha_image = captcha(drawings=[
    +        background(),
    +        text(fonts=[
    +            'fonts/CourierNew-Bold.ttf',
    +            'fonts/LiberationMono-Bold.ttf'],
    +            drawings=[
    +                warp(),
    +                rotate(),
    +                offset()
    +            ]),
    +        curve(),
    +        noise(),
    +        smooth()
    +    ])
    +    image = captcha_image(random.sample(string.uppercase + string.digits, 4))
    +    image.save('sample.jpg', 'JPEG', quality=75)
    diff --git a/server/www/packages/packages-windows/x86/wheezy/captcha/mixin.py b/server/www/packages/packages-windows/x86/wheezy/captcha/mixin.py
    new file mode 100644
    index 0000000..1c95d42
    --- /dev/null
    +++ b/server/www/packages/packages-windows/x86/wheezy/captcha/mixin.py
    @@ -0,0 +1,24 @@
    +
    +"""
    +"""
    +
    +from wheezy.core.descriptors import attribute
    +
    +
    +class CaptchaMixin(object):
    +
    +    @attribute
    +    def challenge_code(self):
    +        return self.captcha_context.get_challenge_code(self.request)
    +
    +    def validate_captcha(self):
    +        return self.captcha_context.validate(self.request,
    +                                             self.errors, self._)
    +
    +    def captcha_widget(self, path):
    +        ctx = self.captcha_context
    +        return (''
    +                '' %
    +                (path, ctx.challenge_key, self.challenge_code,
    +                 self._('If you cannot read, click to generate a new one.'),
    +                 ctx.challenge_key, self.challenge_code))